diff options
1380 files changed, 11491 insertions, 6649 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes | |||
@@ -43,7 +43,7 @@ o udev 081 # udevd --version | |||
43 | o grub 0.93 # grub --version || grub-install --version | 43 | o grub 0.93 # grub --version || grub-install --version |
44 | o mcelog 0.6 # mcelog --version | 44 | o mcelog 0.6 # mcelog --version |
45 | o iptables 1.4.2 # iptables -V | 45 | o iptables 1.4.2 # iptables -V |
46 | o openssl & libcrypto 1.0.1k # openssl version | 46 | o openssl & libcrypto 1.0.0 # openssl version |
47 | 47 | ||
48 | 48 | ||
49 | Kernel compilation | 49 | Kernel compilation |
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt index 0d5bc46dc167..ad6949bff2e3 100644 --- a/Documentation/device-mapper/snapshot.txt +++ b/Documentation/device-mapper/snapshot.txt | |||
@@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor | |||
41 | the amount of free space and expand the <COW device> before it fills up. | 41 | the amount of free space and expand the <COW device> before it fills up. |
42 | 42 | ||
43 | <persistent?> is P (Persistent) or N (Not persistent - will not survive | 43 | <persistent?> is P (Persistent) or N (Not persistent - will not survive |
44 | after reboot). | 44 | after reboot). O (Overflow) can be added as a persistent store option |
45 | The difference is that for transient snapshots less metadata must be | 45 | to allow userspace to advertise its support for seeing "Overflow" in the |
46 | saved on disk - they can be kept in memory by the kernel. | 46 | snapshot status. So supported store types are "P", "PO" and "N". |
47 | |||
48 | The difference between persistent and transient is with transient | ||
49 | snapshots less metadata must be saved on disk - they can be kept in | ||
50 | memory by the kernel. | ||
47 | 51 | ||
48 | 52 | ||
49 | * snapshot-merge <origin> <COW device> <persistent> <chunksize> | 53 | * snapshot-merge <origin> <COW device> <persistent> <chunksize> |
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt index ddfade40ac59..7803e77d85cb 100644 --- a/Documentation/devicetree/bindings/arm/gic-v3.txt +++ b/Documentation/devicetree/bindings/arm/gic-v3.txt | |||
@@ -57,6 +57,8 @@ used to route Message Signalled Interrupts (MSI) to the CPUs. | |||
57 | These nodes must have the following properties: | 57 | These nodes must have the following properties: |
58 | - compatible : Should at least contain "arm,gic-v3-its". | 58 | - compatible : Should at least contain "arm,gic-v3-its". |
59 | - msi-controller : Boolean property. Identifies the node as an MSI controller | 59 | - msi-controller : Boolean property. Identifies the node as an MSI controller |
60 | - #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device | ||
61 | which will generate the MSI. | ||
60 | - reg: Specifies the base physical address and size of the ITS | 62 | - reg: Specifies the base physical address and size of the ITS |
61 | registers. | 63 | registers. |
62 | 64 | ||
@@ -83,6 +85,7 @@ Examples: | |||
83 | gic-its@2c200000 { | 85 | gic-its@2c200000 { |
84 | compatible = "arm,gic-v3-its"; | 86 | compatible = "arm,gic-v3-its"; |
85 | msi-controller; | 87 | msi-controller; |
88 | #msi-cells = <1>; | ||
86 | reg = <0x0 0x2c200000 0 0x200000>; | 89 | reg = <0x0 0x2c200000 0 0x200000>; |
87 | }; | 90 | }; |
88 | }; | 91 | }; |
@@ -107,12 +110,14 @@ Examples: | |||
107 | gic-its@2c200000 { | 110 | gic-its@2c200000 { |
108 | compatible = "arm,gic-v3-its"; | 111 | compatible = "arm,gic-v3-its"; |
109 | msi-controller; | 112 | msi-controller; |
113 | #msi-cells = <1>; | ||
110 | reg = <0x0 0x2c200000 0 0x200000>; | 114 | reg = <0x0 0x2c200000 0 0x200000>; |
111 | }; | 115 | }; |
112 | 116 | ||
113 | gic-its@2c400000 { | 117 | gic-its@2c400000 { |
114 | compatible = "arm,gic-v3-its"; | 118 | compatible = "arm,gic-v3-its"; |
115 | msi-controller; | 119 | msi-controller; |
120 | #msi-cells = <1>; | ||
116 | reg = <0x0 0x2c400000 0 0x200000>; | 121 | reg = <0x0 0x2c400000 0 0x200000>; |
117 | }; | 122 | }; |
118 | }; | 123 | }; |
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index a8274eabae2e..b8e41c148a3c 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt | |||
@@ -497,7 +497,7 @@ cpus { | |||
497 | }; | 497 | }; |
498 | 498 | ||
499 | idle-states { | 499 | idle-states { |
500 | entry-method = "arm,psci"; | 500 | entry-method = "psci"; |
501 | 501 | ||
502 | CPU_RETENTION_0_0: cpu-retention-0-0 { | 502 | CPU_RETENTION_0_0: cpu-retention-0-0 { |
503 | compatible = "arm,idle-state"; | 503 | compatible = "arm,idle-state"; |
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt index 5788d5cf1252..82d40e2505f6 100644 --- a/Documentation/devicetree/bindings/gpio/gpio.txt +++ b/Documentation/devicetree/bindings/gpio/gpio.txt | |||
@@ -16,7 +16,9 @@ properties, each containing a 'gpio-list': | |||
16 | GPIO properties should be named "[<name>-]gpios", with <name> being the purpose | 16 | GPIO properties should be named "[<name>-]gpios", with <name> being the purpose |
17 | of this GPIO for the device. While a non-existent <name> is considered valid | 17 | of this GPIO for the device. While a non-existent <name> is considered valid |
18 | for compatibility reasons (resolving to the "gpios" property), it is not allowed | 18 | for compatibility reasons (resolving to the "gpios" property), it is not allowed |
19 | for new bindings. | 19 | for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old |
20 | bindings use it, but are only supported for compatibility reasons and should not | ||
21 | be used for newer bindings since it has been deprecated. | ||
20 | 22 | ||
21 | GPIO properties can contain one or more GPIO phandles, but only in exceptional | 23 | GPIO properties can contain one or more GPIO phandles, but only in exceptional |
22 | cases should they contain more than one. If your device uses several GPIOs with | 24 | cases should they contain more than one. If your device uses several GPIOs with |
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt index c5933573e0f6..4a3679d54457 100644 --- a/Documentation/devicetree/bindings/iio/accel/bma180.txt +++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt | |||
@@ -1,10 +1,11 @@ | |||
1 | * Bosch BMA180 triaxial acceleration sensor | 1 | * Bosch BMA180 / BMA250 triaxial acceleration sensor |
2 | 2 | ||
3 | http://omapworld.com/BMA180_111_1002839.pdf | 3 | http://omapworld.com/BMA180_111_1002839.pdf |
4 | http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf | ||
4 | 5 | ||
5 | Required properties: | 6 | Required properties: |
6 | 7 | ||
7 | - compatible : should be "bosch,bma180" | 8 | - compatible : should be "bosch,bma180" or "bosch,bma250" |
8 | - reg : the I2C address of the sensor | 9 | - reg : the I2C address of the sensor |
9 | 10 | ||
10 | Optional properties: | 11 | Optional properties: |
@@ -13,6 +14,9 @@ Optional properties: | |||
13 | 14 | ||
14 | - interrupts : interrupt mapping for GPIO IRQ, it should by configured with | 15 | - interrupts : interrupt mapping for GPIO IRQ, it should by configured with |
15 | flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING | 16 | flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING |
17 | For the bma250 the first interrupt listed must be the one | ||
18 | connected to the INT1 pin, the second (optional) interrupt | ||
19 | listed must be the one connected to the INT2 pin. | ||
16 | 20 | ||
17 | Example: | 21 | Example: |
18 | 22 | ||
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt | |||
@@ -25,7 +25,7 @@ Example: | |||
25 | /* Cypress Gen3 touchpad */ | 25 | /* Cypress Gen3 touchpad */ |
26 | touchpad@67 { | 26 | touchpad@67 { |
27 | compatible = "cypress,cyapa"; | 27 | compatible = "cypress,cyapa"; |
28 | reg = <0x24>; | 28 | reg = <0x67>; |
29 | interrupt-parent = <&gpio>; | 29 | interrupt-parent = <&gpio>; |
30 | interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ | 30 | interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ |
31 | wakeup-source; | 31 | wakeup-source; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt | |||
@@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority | |||
4 | interrupt. | 4 | interrupt. |
5 | 5 | ||
6 | Required Properties: | 6 | Required Properties: |
7 | - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" | 7 | - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or |
8 | as fallback | 8 | "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" |
9 | - reg: Base address and size of the controllers memory area | 9 | - reg: Base address and size of the controllers memory area |
10 | - interrupt-parent: phandle of the parent interrupt controller. | 10 | - interrupt-parent: phandle of the parent interrupt controller. |
11 | - interrupts: Interrupt specifier for the controllers interrupt. | 11 | - interrupts: Interrupt specifier for the controllers interrupt. |
@@ -13,6 +13,9 @@ Required Properties: | |||
13 | - #interrupt-cells : Specifies the number of cells needed to encode interrupt | 13 | - #interrupt-cells : Specifies the number of cells needed to encode interrupt |
14 | source, should be 1 | 14 | source, should be 1 |
15 | 15 | ||
16 | Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, | ||
17 | use ar7240 for all other SoCs. | ||
18 | |||
16 | Please refer to interrupts.txt in this directory for details of the common | 19 | Please refer to interrupts.txt in this directory for details of the common |
17 | Interrupt Controllers bindings used by client devices. | 20 | Interrupt Controllers bindings used by client devices. |
18 | 21 | ||
@@ -28,3 +31,16 @@ Example: | |||
28 | interrupt-controller; | 31 | interrupt-controller; |
29 | #interrupt-cells = <1>; | 32 | #interrupt-cells = <1>; |
30 | }; | 33 | }; |
34 | |||
35 | Another example: | ||
36 | |||
37 | interrupt-controller@18060010 { | ||
38 | compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; | ||
39 | reg = <0x18060010 0x4>; | ||
40 | |||
41 | interrupt-parent = <&cpuintc>; | ||
42 | interrupts = <6>; | ||
43 | |||
44 | interrupt-controller; | ||
45 | #interrupt-cells = <1>; | ||
46 | }; | ||
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt index d8ef5bf50f11..7fab84b33531 100644 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt | |||
@@ -7,7 +7,8 @@ OHCI and EHCI controllers. | |||
7 | 7 | ||
8 | Required properties: | 8 | Required properties: |
9 | - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; | 9 | - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; |
10 | "renesas,pci-r8a7791" for the R8A7791 SoC. | 10 | "renesas,pci-r8a7791" for the R8A7791 SoC; |
11 | "renesas,pci-r8a7794" for the R8A7794 SoC. | ||
11 | - reg: A list of physical regions to access the device: the first is | 12 | - reg: A list of physical regions to access the device: the first is |
12 | the operational registers for the OHCI/EHCI controllers and the | 13 | the operational registers for the OHCI/EHCI controllers and the |
13 | second is for the bridge configuration and control registers. | 14 | second is for the bridge configuration and control registers. |
diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt index 32aa26f1e434..acbcb452a69a 100644 --- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt | |||
@@ -2,7 +2,12 @@ PBIAS internal regulator for SD card dual voltage i/o pads on OMAP SoCs. | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: | 4 | - compatible: |
5 | - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7. | 5 | - should be "ti,pbias-dra7" for DRA7 |
6 | - should be "ti,pbias-omap2" for OMAP2 | ||
7 | - should be "ti,pbias-omap3" for OMAP3 | ||
8 | - should be "ti,pbias-omap4" for OMAP4 | ||
9 | - should be "ti,pbias-omap5" for OMAP5 | ||
10 | - "ti,pbias-omap" is deprecated | ||
6 | - reg: pbias register offset from syscon base and size of pbias register. | 11 | - reg: pbias register offset from syscon base and size of pbias register. |
7 | - syscon : phandle of the system control module | 12 | - syscon : phandle of the system control module |
8 | - regulator-name : should be | 13 | - regulator-name : should be |
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index 8f771441be60..705075da2f10 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt | |||
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings: | |||
51 | - renesas,tx-fifo-size : Overrides the default tx fifo size given in words | 51 | - renesas,tx-fifo-size : Overrides the default tx fifo size given in words |
52 | (default is 64) | 52 | (default is 64) |
53 | - renesas,rx-fifo-size : Overrides the default rx fifo size given in words | 53 | - renesas,rx-fifo-size : Overrides the default rx fifo size given in words |
54 | (default is 64, or 256 on R-Car Gen2) | 54 | (default is 64) |
55 | 55 | ||
56 | Pinctrl properties might be needed, too. See | 56 | Pinctrl properties might be needed, too. See |
57 | Documentation/devicetree/bindings/pinctrl/renesas,*. | 57 | Documentation/devicetree/bindings/pinctrl/renesas,*. |
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt index dcefc438272f..6160ffbcb3d3 100644 --- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt +++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt | |||
@@ -15,17 +15,18 @@ Required properties: | |||
15 | - interrupts: Should contain spi interrupt | 15 | - interrupts: Should contain spi interrupt |
16 | 16 | ||
17 | - clocks: phandles to input clocks. | 17 | - clocks: phandles to input clocks. |
18 | The first should be <&topckgen CLK_TOP_SPI_SEL>. | 18 | The first should be one of the following. It's PLL. |
19 | The second should be one of the following. | ||
20 | - <&clk26m>: specify parent clock 26MHZ. | 19 | - <&clk26m>: specify parent clock 26MHZ. |
21 | - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ. | 20 | - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ. |
22 | It's the default one. | 21 | It's the default one. |
23 | - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ. | 22 | - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ. |
24 | - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ. | 23 | - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ. |
25 | - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ. | 24 | - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ. |
25 | The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux. | ||
26 | The third is <&pericfg CLK_PERI_SPI0>. It's clock gate. | ||
26 | 27 | ||
27 | - clock-names: shall be "spi-clk" for the controller clock, and | 28 | - clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the |
28 | "parent-clk" for the parent clock. | 29 | muxes clock, and "spi-clk" for the clock gate. |
29 | 30 | ||
30 | Optional properties: | 31 | Optional properties: |
31 | - mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi | 32 | - mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi |
@@ -44,8 +45,11 @@ spi: spi@1100a000 { | |||
44 | #size-cells = <0>; | 45 | #size-cells = <0>; |
45 | reg = <0 0x1100a000 0 0x1000>; | 46 | reg = <0 0x1100a000 0 0x1000>; |
46 | interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>; | 47 | interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>; |
47 | clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>; | 48 | clocks = <&topckgen CLK_TOP_SYSPLL3_D2>, |
48 | clock-names = "spi-clk", "parent-clk"; | 49 | <&topckgen CLK_TOP_SPI_SEL>, |
50 | <&pericfg CLK_PERI_SPI0>; | ||
51 | clock-names = "parent-clk", "sel-clk", "spi-clk"; | ||
52 | |||
49 | mediatek,pad-select = <0>; | 53 | mediatek,pad-select = <0>; |
50 | status = "disabled"; | 54 | status = "disabled"; |
51 | }; | 55 | }; |
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 8a49362dea6e..41b817f7b670 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt | |||
@@ -55,19 +55,11 @@ of heat dissipation). For example a fan's cooling states correspond to | |||
55 | the different fan speeds possible. Cooling states are referred to by | 55 | the different fan speeds possible. Cooling states are referred to by |
56 | single unsigned integers, where larger numbers mean greater heat | 56 | single unsigned integers, where larger numbers mean greater heat |
57 | dissipation. The precise set of cooling states associated with a device | 57 | dissipation. The precise set of cooling states associated with a device |
58 | (as referred to be the cooling-min-state and cooling-max-state | 58 | (as referred to by the cooling-min-level and cooling-max-level |
59 | properties) should be defined in a particular device's binding. | 59 | properties) should be defined in a particular device's binding. |
60 | For more examples of cooling devices, refer to the example sections below. | 60 | For more examples of cooling devices, refer to the example sections below. |
61 | 61 | ||
62 | Required properties: | 62 | Required properties: |
63 | - cooling-min-state: An integer indicating the smallest | ||
64 | Type: unsigned cooling state accepted. Typically 0. | ||
65 | Size: one cell | ||
66 | |||
67 | - cooling-max-state: An integer indicating the largest | ||
68 | Type: unsigned cooling state accepted. | ||
69 | Size: one cell | ||
70 | |||
71 | - #cooling-cells: Used to provide cooling device specific information | 63 | - #cooling-cells: Used to provide cooling device specific information |
72 | Type: unsigned while referring to it. Must be at least 2, in order | 64 | Type: unsigned while referring to it. Must be at least 2, in order |
73 | Size: one cell to specify minimum and maximum cooling state used | 65 | Size: one cell to specify minimum and maximum cooling state used |
@@ -77,6 +69,15 @@ Required properties: | |||
77 | See Cooling device maps section below for more details | 69 | See Cooling device maps section below for more details |
78 | on how consumers refer to cooling devices. | 70 | on how consumers refer to cooling devices. |
79 | 71 | ||
72 | Optional properties: | ||
73 | - cooling-min-level: An integer indicating the smallest | ||
74 | Type: unsigned cooling state accepted. Typically 0. | ||
75 | Size: one cell | ||
76 | |||
77 | - cooling-max-level: An integer indicating the largest | ||
78 | Type: unsigned cooling state accepted. | ||
79 | Size: one cell | ||
80 | |||
80 | * Trip points | 81 | * Trip points |
81 | 82 | ||
82 | The trip node is a node to describe a point in the temperature domain | 83 | The trip node is a node to describe a point in the temperature domain |
@@ -225,8 +226,8 @@ cpus { | |||
225 | 396000 950000 | 226 | 396000 950000 |
226 | 198000 850000 | 227 | 198000 850000 |
227 | >; | 228 | >; |
228 | cooling-min-state = <0>; | 229 | cooling-min-level = <0>; |
229 | cooling-max-state = <3>; | 230 | cooling-max-level = <3>; |
230 | #cooling-cells = <2>; /* min followed by max */ | 231 | #cooling-cells = <2>; /* min followed by max */ |
231 | }; | 232 | }; |
232 | ... | 233 | ... |
@@ -240,8 +241,8 @@ cpus { | |||
240 | */ | 241 | */ |
241 | fan0: fan@0x48 { | 242 | fan0: fan@0x48 { |
242 | ... | 243 | ... |
243 | cooling-min-state = <0>; | 244 | cooling-min-level = <0>; |
244 | cooling-max-state = <9>; | 245 | cooling-max-level = <9>; |
245 | #cooling-cells = <2>; /* min followed by max */ | 246 | #cooling-cells = <2>; /* min followed by max */ |
246 | }; | 247 | }; |
247 | }; | 248 | }; |
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt index d71ef07bca5d..a057b75ba4b5 100644 --- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt +++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt | |||
@@ -6,6 +6,7 @@ Required properties: | |||
6 | "lsi,zevio-usb" | 6 | "lsi,zevio-usb" |
7 | "qcom,ci-hdrc" | 7 | "qcom,ci-hdrc" |
8 | "chipidea,usb2" | 8 | "chipidea,usb2" |
9 | "xlnx,zynq-usb-2.20a" | ||
9 | - reg: base address and length of the registers | 10 | - reg: base address and length of the registers |
10 | - interrupts: interrupt for the USB controller | 11 | - interrupts: interrupt for the USB controller |
11 | 12 | ||
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index 64a4ca6cf96f..7d48f63db44e 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt | |||
@@ -5,6 +5,7 @@ Required properties: | |||
5 | - "renesas,usbhs-r8a7790" | 5 | - "renesas,usbhs-r8a7790" |
6 | - "renesas,usbhs-r8a7791" | 6 | - "renesas,usbhs-r8a7791" |
7 | - "renesas,usbhs-r8a7794" | 7 | - "renesas,usbhs-r8a7794" |
8 | - "renesas,usbhs-r8a7795" | ||
8 | - reg: Base address and length of the register for the USBHS | 9 | - reg: Base address and length of the register for the USBHS |
9 | - interrupts: Interrupt specifier for the USBHS | 10 | - interrupts: Interrupt specifier for the USBHS |
10 | - clocks: A list of phandle + clock specifier pairs | 11 | - clocks: A list of phandle + clock specifier pairs |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index ac5f0c34ae00..82d2ac97af74 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -203,6 +203,7 @@ sitronix Sitronix Technology Corporation | |||
203 | skyworks Skyworks Solutions, Inc. | 203 | skyworks Skyworks Solutions, Inc. |
204 | smsc Standard Microsystems Corporation | 204 | smsc Standard Microsystems Corporation |
205 | snps Synopsys, Inc. | 205 | snps Synopsys, Inc. |
206 | socionext Socionext Inc. | ||
206 | solidrun SolidRun | 207 | solidrun SolidRun |
207 | solomon Solomon Systech Limited | 208 | solomon Solomon Systech Limited |
208 | sony Sony Corporation | 209 | sony Sony Corporation |
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt index b80606de545a..f59c43b6411b 100644 --- a/Documentation/gpio/board.txt +++ b/Documentation/gpio/board.txt | |||
@@ -21,8 +21,8 @@ exact way to do it depends on the GPIO controller providing the GPIOs, see the | |||
21 | device tree bindings for your controller. | 21 | device tree bindings for your controller. |
22 | 22 | ||
23 | GPIOs mappings are defined in the consumer device's node, in a property named | 23 | GPIOs mappings are defined in the consumer device's node, in a property named |
24 | <function>-gpios, where <function> is the function the driver will request | 24 | either <function>-gpios or <function>-gpio, where <function> is the function |
25 | through gpiod_get(). For example: | 25 | the driver will request through gpiod_get(). For example: |
26 | 26 | ||
27 | foo_device { | 27 | foo_device { |
28 | compatible = "acme,foo"; | 28 | compatible = "acme,foo"; |
@@ -31,7 +31,7 @@ through gpiod_get(). For example: | |||
31 | <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */ | 31 | <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */ |
32 | <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */ | 32 | <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */ |
33 | 33 | ||
34 | power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>; | 34 | power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | This property will make GPIOs 15, 16 and 17 available to the driver under the | 37 | This property will make GPIOs 15, 16 and 17 available to the driver under the |
@@ -39,15 +39,24 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the | |||
39 | 39 | ||
40 | struct gpio_desc *red, *green, *blue, *power; | 40 | struct gpio_desc *red, *green, *blue, *power; |
41 | 41 | ||
42 | red = gpiod_get_index(dev, "led", 0); | 42 | red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH); |
43 | green = gpiod_get_index(dev, "led", 1); | 43 | green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH); |
44 | blue = gpiod_get_index(dev, "led", 2); | 44 | blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH); |
45 | 45 | ||
46 | power = gpiod_get(dev, "power"); | 46 | power = gpiod_get(dev, "power", GPIOD_OUT_HIGH); |
47 | 47 | ||
48 | The led GPIOs will be active-high, while the power GPIO will be active-low (i.e. | 48 | The led GPIOs will be active-high, while the power GPIO will be active-low (i.e. |
49 | gpiod_is_active_low(power) will be true). | 49 | gpiod_is_active_low(power) will be true). |
50 | 50 | ||
51 | The second parameter of the gpiod_get() functions, the con_id string, has to be | ||
52 | the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically | ||
53 | looked up by the gpiod functions internally) used in the device tree. With above | ||
54 | "led-gpios" example, use the prefix without the "-" as con_id parameter: "led". | ||
55 | |||
56 | Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio") | ||
57 | with the string passed in con_id to get the resulting string | ||
58 | (snprintf(... "%s-%s", con_id, gpio_suffixes[]). | ||
59 | |||
51 | ACPI | 60 | ACPI |
52 | ---- | 61 | ---- |
53 | ACPI also supports function names for GPIOs in a similar fashion to DT. | 62 | ACPI also supports function names for GPIOs in a similar fashion to DT. |
@@ -142,13 +151,14 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows: | |||
142 | 151 | ||
143 | struct gpio_desc *red, *green, *blue, *power; | 152 | struct gpio_desc *red, *green, *blue, *power; |
144 | 153 | ||
145 | red = gpiod_get_index(dev, "led", 0); | 154 | red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH); |
146 | green = gpiod_get_index(dev, "led", 1); | 155 | green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH); |
147 | blue = gpiod_get_index(dev, "led", 2); | 156 | blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH); |
148 | 157 | ||
149 | power = gpiod_get(dev, "power"); | 158 | power = gpiod_get(dev, "power", GPIOD_OUT_HIGH); |
150 | gpiod_direction_output(power, 1); | ||
151 | 159 | ||
152 | Since the "power" GPIO is mapped as active-low, its actual signal will be 0 | 160 | Since the "led" GPIOs are mapped as active-high, this example will switch their |
153 | after this code. Contrary to the legacy integer GPIO interface, the active-low | 161 | signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped |
154 | property is handled during mapping and is thus transparent to GPIO consumers. | 162 | as active-low, its actual signal will be 0 after this code. Contrary to the legacy |
163 | integer GPIO interface, the active-low property is handled during mapping and is | ||
164 | thus transparent to GPIO consumers. | ||
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt index a206639454ab..e000502fde20 100644 --- a/Documentation/gpio/consumer.txt +++ b/Documentation/gpio/consumer.txt | |||
@@ -39,6 +39,9 @@ device that displays digits), an additional index argument can be specified: | |||
39 | const char *con_id, unsigned int idx, | 39 | const char *con_id, unsigned int idx, |
40 | enum gpiod_flags flags) | 40 | enum gpiod_flags flags) |
41 | 41 | ||
42 | For a more detailed description of the con_id parameter in the DeviceTree case | ||
43 | see Documentation/gpio/board.txt | ||
44 | |||
42 | The flags parameter is used to optionally specify a direction and initial value | 45 | The flags parameter is used to optionally specify a direction and initial value |
43 | for the GPIO. Values can be: | 46 | for the GPIO. Values can be: |
44 | 47 | ||
diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775 index f0dd3d2fec96..76add4c9cd68 100644 --- a/Documentation/hwmon/nct6775 +++ b/Documentation/hwmon/nct6775 | |||
@@ -32,6 +32,10 @@ Supported chips: | |||
32 | Prefix: 'nct6792' | 32 | Prefix: 'nct6792' |
33 | Addresses scanned: ISA address retrieved from Super I/O registers | 33 | Addresses scanned: ISA address retrieved from Super I/O registers |
34 | Datasheet: Available from Nuvoton upon request | 34 | Datasheet: Available from Nuvoton upon request |
35 | * Nuvoton NCT6793D | ||
36 | Prefix: 'nct6793' | ||
37 | Addresses scanned: ISA address retrieved from Super I/O registers | ||
38 | Datasheet: Available from Nuvoton upon request | ||
35 | 39 | ||
36 | Authors: | 40 | Authors: |
37 | Guenter Roeck <linux@roeck-us.net> | 41 | Guenter Roeck <linux@roeck-us.net> |
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is | |||
361 | ABS_MT_POSITION_X := T_X | 361 | ABS_MT_POSITION_X := T_X |
362 | ABS_MT_POSITION_Y := T_Y | 362 | ABS_MT_POSITION_Y := T_Y |
363 | ABS_MT_TOOL_X := C_X | 363 | ABS_MT_TOOL_X := C_X |
364 | ABS_MT_TOOL_X := C_Y | 364 | ABS_MT_TOOL_Y := C_Y |
365 | 365 | ||
366 | Unfortunately, there is not enough information to specify both the touching | 366 | Unfortunately, there is not enough information to specify both the touching |
367 | ellipse and the tool ellipse, so one has to resort to approximations. One | 367 | ellipse and the tool ellipse, so one has to resort to approximations. One |
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt new file mode 100644 index 000000000000..031ef4a63485 --- /dev/null +++ b/Documentation/networking/vrf.txt | |||
@@ -0,0 +1,96 @@ | |||
1 | Virtual Routing and Forwarding (VRF) | ||
2 | ==================================== | ||
3 | The VRF device combined with ip rules provides the ability to create virtual | ||
4 | routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the | ||
5 | Linux network stack. One use case is the multi-tenancy problem where each | ||
6 | tenant has their own unique routing tables and in the very least need | ||
7 | different default gateways. | ||
8 | |||
9 | Processes can be "VRF aware" by binding a socket to the VRF device. Packets | ||
10 | through the socket then use the routing table associated with the VRF | ||
11 | device. An important feature of the VRF device implementation is that it | ||
12 | impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected | ||
13 | (ie., they do not need to be run in each VRF). The design also allows | ||
14 | the use of higher priority ip rules (Policy Based Routing, PBR) to take | ||
15 | precedence over the VRF device rules directing specific traffic as desired. | ||
16 | |||
17 | In addition, VRF devices allow VRFs to be nested within namespaces. For | ||
18 | example network namespaces provide separation of network interfaces at L1 | ||
19 | (Layer 1 separation), VLANs on the interfaces within a namespace provide | ||
20 | L2 separation and then VRF devices provide L3 separation. | ||
21 | |||
22 | Design | ||
23 | ------ | ||
24 | A VRF device is created with an associated route table. Network interfaces | ||
25 | are then enslaved to a VRF device: | ||
26 | |||
27 | +-----------------------------+ | ||
28 | | vrf-blue | ===> route table 10 | ||
29 | +-----------------------------+ | ||
30 | | | | | ||
31 | +------+ +------+ +-------------+ | ||
32 | | eth1 | | eth2 | ... | bond1 | | ||
33 | +------+ +------+ +-------------+ | ||
34 | | | | ||
35 | +------+ +------+ | ||
36 | | eth8 | | eth9 | | ||
37 | +------+ +------+ | ||
38 | |||
39 | Packets received on an enslaved device and are switched to the VRF device | ||
40 | using an rx_handler which gives the impression that packets flow through | ||
41 | the VRF device. Similarly on egress routing rules are used to send packets | ||
42 | to the VRF device driver before getting sent out the actual interface. This | ||
43 | allows tcpdump on a VRF device to capture all packets into and out of the | ||
44 | VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied | ||
45 | using the VRF device to specify rules that apply to the VRF domain as a whole. | ||
46 | |||
47 | [1] Packets in the forwarded state do not flow through the device, so those | ||
48 | packets are not seen by tcpdump. Will revisit this limitation in a | ||
49 | future release. | ||
50 | |||
51 | [2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev | ||
52 | set to real ingress device and egress is limited to NF_INET_POST_ROUTING. | ||
53 | Will revisit this limitation in a future release. | ||
54 | |||
55 | |||
56 | Setup | ||
57 | ----- | ||
58 | 1. VRF device is created with an association to a FIB table. | ||
59 | e.g, ip link add vrf-blue type vrf table 10 | ||
60 | ip link set dev vrf-blue up | ||
61 | |||
62 | 2. Rules are added that send lookups to the associated FIB table when the | ||
63 | iif or oif is the VRF device. e.g., | ||
64 | ip ru add oif vrf-blue table 10 | ||
65 | ip ru add iif vrf-blue table 10 | ||
66 | |||
67 | Set the default route for the table (and hence default route for the VRF). | ||
68 | e.g, ip route add table 10 prohibit default | ||
69 | |||
70 | 3. Enslave L3 interfaces to a VRF device. | ||
71 | e.g, ip link set dev eth1 master vrf-blue | ||
72 | |||
73 | Local and connected routes for enslaved devices are automatically moved to | ||
74 | the table associated with VRF device. Any additional routes depending on | ||
75 | the enslaved device will need to be reinserted following the enslavement. | ||
76 | |||
77 | 4. Additional VRF routes are added to associated table. | ||
78 | e.g., ip route add table 10 ... | ||
79 | |||
80 | |||
81 | Applications | ||
82 | ------------ | ||
83 | Applications that are to work within a VRF need to bind their socket to the | ||
84 | VRF device: | ||
85 | |||
86 | setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1); | ||
87 | |||
88 | or to specify the output device using cmsg and IP_PKTINFO. | ||
89 | |||
90 | |||
91 | Limitations | ||
92 | ----------- | ||
93 | VRF device currently only works for IPv4. Support for IPv6 is under development. | ||
94 | |||
95 | Index of original ingress interface is not available via cmsg. Will address | ||
96 | soon. | ||
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt | |||
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned | |||
979 | (alternatively, the runtime_suspend() callback will have to check if the | 979 | (alternatively, the runtime_suspend() callback will have to check if the |
980 | device should really be suspended and return -EAGAIN if that is not the case). | 980 | device should really be suspended and return -EAGAIN if that is not the case). |
981 | 981 | ||
982 | The runtime PM of PCI devices is disabled by default. It is also blocked by | 982 | The runtime PM of PCI devices is enabled by default by the PCI core. PCI |
983 | pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI | 983 | device drivers do not need to enable it and should not attempt to do so. |
984 | driver implements the runtime PM callbacks and intends to use the runtime PM | 984 | However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() |
985 | framework provided by the PM core and the PCI subsystem, it should enable this | 985 | helper function. In addition to that, the runtime PM usage counter of |
986 | feature by executing the pm_runtime_enable() helper function. However, the | 986 | each PCI device is incremented by local_pci_probe() before executing the |
987 | driver should not call the pm_runtime_allow() helper function unblocking | 987 | probe callback provided by the device's driver. |
988 | the runtime PM of the device. Instead, it should allow user space or some | 988 | |
989 | platform-specific code to do that (user space can do it via sysfs), although | 989 | If a PCI driver implements the runtime PM callbacks and intends to use the |
990 | once it has called pm_runtime_enable(), it must be prepared to handle the | 990 | runtime PM framework provided by the PM core and the PCI subsystem, it needs |
991 | to decrement the device's runtime PM usage counter in its probe callback | ||
992 | function. If it doesn't do that, the counter will always be different from | ||
993 | zero for the device and it will never be runtime-suspended. The simplest | ||
994 | way to do that is by calling pm_runtime_put_noidle(), but if the driver | ||
995 | wants to schedule an autosuspend right away, for example, it may call | ||
996 | pm_runtime_put_autosuspend() instead for this purpose. Generally, it | ||
997 | just needs to call a function that decrements the devices usage counter | ||
998 | from its probe routine to make runtime PM work for the device. | ||
999 | |||
1000 | It is important to remember that the driver's runtime_suspend() callback | ||
1001 | may be executed right after the usage counter has been decremented, because | ||
1002 | user space may already have cuased the pm_runtime_allow() helper function | ||
1003 | unblocking the runtime PM of the device to run via sysfs, so the driver must | ||
1004 | be prepared to cope with that. | ||
1005 | |||
1006 | The driver itself should not call pm_runtime_allow(), though. Instead, it | ||
1007 | should let user space or some platform-specific code do that (user space can | ||
1008 | do it via sysfs as stated above), but it must be prepared to handle the | ||
991 | runtime PM of the device correctly as soon as pm_runtime_allow() is called | 1009 | runtime PM of the device correctly as soon as pm_runtime_allow() is called |
992 | (which may happen at any time). [It also is possible that user space causes | 1010 | (which may happen at any time, even before the driver is loaded). |
993 | pm_runtime_allow() to be called via sysfs before the driver is loaded, so in | 1011 | |
994 | fact the driver has to be prepared to handle the runtime PM of the device as | 1012 | When the driver's remove callback runs, it has to balance the decrementation |
995 | soon as it calls pm_runtime_enable().] | 1013 | of the device's runtime PM usage counter at the probe time. For this reason, |
1014 | if it has decremented the counter in its probe callback, it must run | ||
1015 | pm_runtime_get_noresume() in its remove callback. [Since the core carries | ||
1016 | out a runtime resume of the device and bumps up the device's usage counter | ||
1017 | before running the driver's remove callback, the runtime PM of the device | ||
1018 | is effectively disabled for the duration of the remove execution and all | ||
1019 | runtime PM helper functions incrementing the device's usage counter are | ||
1020 | then effectively equivalent to pm_runtime_get_noresume().] | ||
996 | 1021 | ||
997 | The runtime PM framework works by processing requests to suspend or resume | 1022 | The runtime PM framework works by processing requests to suspend or resume |
998 | devices, or to check if they are idle (in which cases it is reasonable to | 1023 | devices, or to check if they are idle (in which cases it is reasonable to |
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ | 19 | */ |
20 | #define _GNU_SOURCE | 20 | #define _GNU_SOURCE |
21 | #define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ | ||
21 | #include <errno.h> | 22 | #include <errno.h> |
22 | #include <fcntl.h> | 23 | #include <fcntl.h> |
23 | #include <inttypes.h> | 24 | #include <inttypes.h> |
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index f4cb0b2d5cd7..477927becacb 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt | |||
@@ -15,8 +15,8 @@ The updated API replacements are: | |||
15 | 15 | ||
16 | DEFINE_STATIC_KEY_TRUE(key); | 16 | DEFINE_STATIC_KEY_TRUE(key); |
17 | DEFINE_STATIC_KEY_FALSE(key); | 17 | DEFINE_STATIC_KEY_FALSE(key); |
18 | static_key_likely() | 18 | static_branch_likely() |
19 | statick_key_unlikely() | 19 | static_branch_unlikely() |
20 | 20 | ||
21 | 0) Abstract | 21 | 0) Abstract |
22 | 22 | ||
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 6294b5186ae5..809ab6efcc74 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt | |||
@@ -54,13 +54,15 @@ default_qdisc | |||
54 | -------------- | 54 | -------------- |
55 | 55 | ||
56 | The default queuing discipline to use for network devices. This allows | 56 | The default queuing discipline to use for network devices. This allows |
57 | overriding the default queue discipline of pfifo_fast with an | 57 | overriding the default of pfifo_fast with an alternative. Since the default |
58 | alternative. Since the default queuing discipline is created with the | 58 | queuing discipline is created without additional parameters so is best suited |
59 | no additional parameters so is best suited to queuing disciplines that | 59 | to queuing disciplines that work well without configuration like stochastic |
60 | work well without configuration like stochastic fair queue (sfq), | 60 | fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use |
61 | CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines | 61 | queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin |
62 | like Hierarchical Token Bucket or Deficit Round Robin which require setting | 62 | which require setting up classes and bandwidths. Note that physical multiqueue |
63 | up classes and bandwidths. | 63 | interfaces still use mq as root qdisc, which in turn uses this default for its |
64 | leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead | ||
65 | default to noqueue. | ||
64 | Default: pfifo_fast | 66 | Default: pfifo_fast |
65 | 67 | ||
66 | busy_read | 68 | busy_read |
diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt index c3797b529991..a1ce2235f121 100644 --- a/Documentation/thermal/power_allocator.txt +++ b/Documentation/thermal/power_allocator.txt | |||
@@ -4,7 +4,7 @@ Power allocator governor tunables | |||
4 | Trip points | 4 | Trip points |
5 | ----------- | 5 | ----------- |
6 | 6 | ||
7 | The governor requires the following two passive trip points: | 7 | The governor works optimally with the following two passive trip points: |
8 | 8 | ||
9 | 1. "switch on" trip point: temperature above which the governor | 9 | 1. "switch on" trip point: temperature above which the governor |
10 | control loop starts operating. This is the first passive trip | 10 | control loop starts operating. This is the first passive trip |
diff --git a/MAINTAINERS b/MAINTAINERS index 7ba7ab749c85..b8577ad9b8a2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -615,9 +615,8 @@ F: Documentation/hwmon/fam15h_power | |||
615 | F: drivers/hwmon/fam15h_power.c | 615 | F: drivers/hwmon/fam15h_power.c |
616 | 616 | ||
617 | AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER | 617 | AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER |
618 | M: Thomas Dahlmann <dahlmann.thomas@arcor.de> | ||
619 | L: linux-geode@lists.infradead.org (moderated for non-subscribers) | 618 | L: linux-geode@lists.infradead.org (moderated for non-subscribers) |
620 | S: Supported | 619 | S: Orphan |
621 | F: drivers/usb/gadget/udc/amd5536udc.* | 620 | F: drivers/usb/gadget/udc/amd5536udc.* |
622 | 621 | ||
623 | AMD GEODE PROCESSOR/CHIPSET SUPPORT | 622 | AMD GEODE PROCESSOR/CHIPSET SUPPORT |
@@ -808,6 +807,13 @@ S: Maintained | |||
808 | F: drivers/video/fbdev/arcfb.c | 807 | F: drivers/video/fbdev/arcfb.c |
809 | F: drivers/video/fbdev/core/fb_defio.c | 808 | F: drivers/video/fbdev/core/fb_defio.c |
810 | 809 | ||
810 | ARCNET NETWORK LAYER | ||
811 | M: Michael Grzeschik <m.grzeschik@pengutronix.de> | ||
812 | L: netdev@vger.kernel.org | ||
813 | S: Maintained | ||
814 | F: drivers/net/arcnet/ | ||
815 | F: include/uapi/linux/if_arcnet.h | ||
816 | |||
811 | ARM MFM AND FLOPPY DRIVERS | 817 | ARM MFM AND FLOPPY DRIVERS |
812 | M: Ian Molton <spyro@f2s.com> | 818 | M: Ian Molton <spyro@f2s.com> |
813 | S: Maintained | 819 | S: Maintained |
@@ -3394,7 +3400,6 @@ F: drivers/staging/dgnc/ | |||
3394 | 3400 | ||
3395 | DIGI EPCA PCI PRODUCTS | 3401 | DIGI EPCA PCI PRODUCTS |
3396 | M: Lidza Louina <lidza.louina@gmail.com> | 3402 | M: Lidza Louina <lidza.louina@gmail.com> |
3397 | M: Mark Hounschell <markh@compro.net> | ||
3398 | M: Daeseok Youn <daeseok.youn@gmail.com> | 3403 | M: Daeseok Youn <daeseok.youn@gmail.com> |
3399 | L: driverdev-devel@linuxdriverproject.org | 3404 | L: driverdev-devel@linuxdriverproject.org |
3400 | S: Maintained | 3405 | S: Maintained |
@@ -3586,6 +3591,13 @@ F: drivers/gpu/drm/i915/ | |||
3586 | F: include/drm/i915* | 3591 | F: include/drm/i915* |
3587 | F: include/uapi/drm/i915* | 3592 | F: include/uapi/drm/i915* |
3588 | 3593 | ||
3594 | DRM DRIVERS FOR ATMEL HLCDC | ||
3595 | M: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
3596 | L: dri-devel@lists.freedesktop.org | ||
3597 | S: Supported | ||
3598 | F: drivers/gpu/drm/atmel-hlcdc/ | ||
3599 | F: Documentation/devicetree/bindings/drm/atmel/ | ||
3600 | |||
3589 | DRM DRIVERS FOR EXYNOS | 3601 | DRM DRIVERS FOR EXYNOS |
3590 | M: Inki Dae <inki.dae@samsung.com> | 3602 | M: Inki Dae <inki.dae@samsung.com> |
3591 | M: Joonyoung Shim <jy0922.shim@samsung.com> | 3603 | M: Joonyoung Shim <jy0922.shim@samsung.com> |
@@ -3614,6 +3626,14 @@ S: Maintained | |||
3614 | F: drivers/gpu/drm/imx/ | 3626 | F: drivers/gpu/drm/imx/ |
3615 | F: Documentation/devicetree/bindings/drm/imx/ | 3627 | F: Documentation/devicetree/bindings/drm/imx/ |
3616 | 3628 | ||
3629 | DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets) | ||
3630 | M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> | ||
3631 | L: dri-devel@lists.freedesktop.org | ||
3632 | T: git git://github.com/patjak/drm-gma500 | ||
3633 | S: Maintained | ||
3634 | F: drivers/gpu/drm/gma500 | ||
3635 | F: include/drm/gma500* | ||
3636 | |||
3617 | DRM DRIVERS FOR NVIDIA TEGRA | 3637 | DRM DRIVERS FOR NVIDIA TEGRA |
3618 | M: Thierry Reding <thierry.reding@gmail.com> | 3638 | M: Thierry Reding <thierry.reding@gmail.com> |
3619 | M: Terje Bergström <tbergstrom@nvidia.com> | 3639 | M: Terje Bergström <tbergstrom@nvidia.com> |
@@ -3998,7 +4018,7 @@ S: Maintained | |||
3998 | F: sound/usb/misc/ua101.c | 4018 | F: sound/usb/misc/ua101.c |
3999 | 4019 | ||
4000 | EXTENSIBLE FIRMWARE INTERFACE (EFI) | 4020 | EXTENSIBLE FIRMWARE INTERFACE (EFI) |
4001 | M: Matt Fleming <matt.fleming@intel.com> | 4021 | M: Matt Fleming <matt@codeblueprint.co.uk> |
4002 | L: linux-efi@vger.kernel.org | 4022 | L: linux-efi@vger.kernel.org |
4003 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git | 4023 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git |
4004 | S: Maintained | 4024 | S: Maintained |
@@ -4013,7 +4033,7 @@ F: include/linux/efi*.h | |||
4013 | EFI VARIABLE FILESYSTEM | 4033 | EFI VARIABLE FILESYSTEM |
4014 | M: Matthew Garrett <matthew.garrett@nebula.com> | 4034 | M: Matthew Garrett <matthew.garrett@nebula.com> |
4015 | M: Jeremy Kerr <jk@ozlabs.org> | 4035 | M: Jeremy Kerr <jk@ozlabs.org> |
4016 | M: Matt Fleming <matt.fleming@intel.com> | 4036 | M: Matt Fleming <matt@codeblueprint.co.uk> |
4017 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git | 4037 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git |
4018 | L: linux-efi@vger.kernel.org | 4038 | L: linux-efi@vger.kernel.org |
4019 | S: Maintained | 4039 | S: Maintained |
@@ -5952,7 +5972,7 @@ F: virt/kvm/ | |||
5952 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V | 5972 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V |
5953 | M: Joerg Roedel <joro@8bytes.org> | 5973 | M: Joerg Roedel <joro@8bytes.org> |
5954 | L: kvm@vger.kernel.org | 5974 | L: kvm@vger.kernel.org |
5955 | W: http://kvm.qumranet.com | 5975 | W: http://www.linux-kvm.org/ |
5956 | S: Maintained | 5976 | S: Maintained |
5957 | F: arch/x86/include/asm/svm.h | 5977 | F: arch/x86/include/asm/svm.h |
5958 | F: arch/x86/kvm/svm.c | 5978 | F: arch/x86/kvm/svm.c |
@@ -5960,7 +5980,7 @@ F: arch/x86/kvm/svm.c | |||
5960 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC | 5980 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC |
5961 | M: Alexander Graf <agraf@suse.com> | 5981 | M: Alexander Graf <agraf@suse.com> |
5962 | L: kvm-ppc@vger.kernel.org | 5982 | L: kvm-ppc@vger.kernel.org |
5963 | W: http://kvm.qumranet.com | 5983 | W: http://www.linux-kvm.org/ |
5964 | T: git git://github.com/agraf/linux-2.6.git | 5984 | T: git git://github.com/agraf/linux-2.6.git |
5965 | S: Supported | 5985 | S: Supported |
5966 | F: arch/powerpc/include/asm/kvm* | 5986 | F: arch/powerpc/include/asm/kvm* |
@@ -6452,11 +6472,11 @@ F: drivers/hwmon/ltc4261.c | |||
6452 | LTP (Linux Test Project) | 6472 | LTP (Linux Test Project) |
6453 | M: Mike Frysinger <vapier@gentoo.org> | 6473 | M: Mike Frysinger <vapier@gentoo.org> |
6454 | M: Cyril Hrubis <chrubis@suse.cz> | 6474 | M: Cyril Hrubis <chrubis@suse.cz> |
6455 | M: Wanlong Gao <gaowanlong@cn.fujitsu.com> | 6475 | M: Wanlong Gao <wanlong.gao@gmail.com> |
6456 | M: Jan Stancek <jstancek@redhat.com> | 6476 | M: Jan Stancek <jstancek@redhat.com> |
6457 | M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> | 6477 | M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> |
6458 | M: Alexey Kodanev <alexey.kodanev@oracle.com> | 6478 | M: Alexey Kodanev <alexey.kodanev@oracle.com> |
6459 | L: ltp-list@lists.sourceforge.net (subscribers-only) | 6479 | L: ltp@lists.linux.it (subscribers-only) |
6460 | W: http://linux-test-project.github.io/ | 6480 | W: http://linux-test-project.github.io/ |
6461 | T: git git://github.com/linux-test-project/ltp.git | 6481 | T: git git://github.com/linux-test-project/ltp.git |
6462 | S: Maintained | 6482 | S: Maintained |
@@ -8500,7 +8520,6 @@ F: Documentation/networking/LICENSE.qla3xxx | |||
8500 | F: drivers/net/ethernet/qlogic/qla3xxx.* | 8520 | F: drivers/net/ethernet/qlogic/qla3xxx.* |
8501 | 8521 | ||
8502 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER | 8522 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER |
8503 | M: Shahed Shaikh <shahed.shaikh@qlogic.com> | ||
8504 | M: Dept-GELinuxNICDev@qlogic.com | 8523 | M: Dept-GELinuxNICDev@qlogic.com |
8505 | L: netdev@vger.kernel.org | 8524 | L: netdev@vger.kernel.org |
8506 | S: Supported | 8525 | S: Supported |
@@ -9097,6 +9116,15 @@ S: Supported | |||
9097 | F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt | 9116 | F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt |
9098 | F: drivers/net/ethernet/synopsys/dwc_eth_qos.c | 9117 | F: drivers/net/ethernet/synopsys/dwc_eth_qos.c |
9099 | 9118 | ||
9119 | SYNOPSYS DESIGNWARE I2C DRIVER | ||
9120 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
9121 | M: Jarkko Nikula <jarkko.nikula@linux.intel.com> | ||
9122 | M: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
9123 | L: linux-i2c@vger.kernel.org | ||
9124 | S: Maintained | ||
9125 | F: drivers/i2c/busses/i2c-designware-* | ||
9126 | F: include/linux/platform_data/i2c-designware.h | ||
9127 | |||
9100 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER | 9128 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER |
9101 | M: Seungwon Jeon <tgih.jun@samsung.com> | 9129 | M: Seungwon Jeon <tgih.jun@samsung.com> |
9102 | M: Jaehoon Chung <jh80.chung@samsung.com> | 9130 | M: Jaehoon Chung <jh80.chung@samsung.com> |
@@ -9904,13 +9932,12 @@ F: drivers/staging/media/lirc/ | |||
9904 | STAGING - LUSTRE PARALLEL FILESYSTEM | 9932 | STAGING - LUSTRE PARALLEL FILESYSTEM |
9905 | M: Oleg Drokin <oleg.drokin@intel.com> | 9933 | M: Oleg Drokin <oleg.drokin@intel.com> |
9906 | M: Andreas Dilger <andreas.dilger@intel.com> | 9934 | M: Andreas Dilger <andreas.dilger@intel.com> |
9907 | L: HPDD-discuss@lists.01.org (moderated for non-subscribers) | 9935 | L: lustre-devel@lists.lustre.org (moderated for non-subscribers) |
9908 | W: http://lustre.opensfs.org/ | 9936 | W: http://wiki.lustre.org/ |
9909 | S: Maintained | 9937 | S: Maintained |
9910 | F: drivers/staging/lustre | 9938 | F: drivers/staging/lustre |
9911 | 9939 | ||
9912 | STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) | 9940 | STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) |
9913 | M: Julian Andres Klode <jak@jak-linux.org> | ||
9914 | M: Marc Dietrich <marvin24@gmx.de> | 9941 | M: Marc Dietrich <marvin24@gmx.de> |
9915 | L: ac100@lists.launchpad.net (moderated for non-subscribers) | 9942 | L: ac100@lists.launchpad.net (moderated for non-subscribers) |
9916 | L: linux-tegra@vger.kernel.org | 9943 | L: linux-tegra@vger.kernel.org |
@@ -10338,6 +10365,16 @@ F: include/uapi/linux/thermal.h | |||
10338 | F: include/linux/cpu_cooling.h | 10365 | F: include/linux/cpu_cooling.h |
10339 | F: Documentation/devicetree/bindings/thermal/ | 10366 | F: Documentation/devicetree/bindings/thermal/ |
10340 | 10367 | ||
10368 | THERMAL/CPU_COOLING | ||
10369 | M: Amit Daniel Kachhap <amit.kachhap@gmail.com> | ||
10370 | M: Viresh Kumar <viresh.kumar@linaro.org> | ||
10371 | M: Javi Merino <javi.merino@arm.com> | ||
10372 | L: linux-pm@vger.kernel.org | ||
10373 | S: Supported | ||
10374 | F: Documentation/thermal/cpu-cooling-api.txt | ||
10375 | F: drivers/thermal/cpu_cooling.c | ||
10376 | F: include/linux/cpu_cooling.h | ||
10377 | |||
10341 | THINGM BLINK(1) USB RGB LED DRIVER | 10378 | THINGM BLINK(1) USB RGB LED DRIVER |
10342 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> | 10379 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> |
10343 | S: Maintained | 10380 | S: Maintained |
@@ -11187,7 +11224,7 @@ F: drivers/vlynq/vlynq.c | |||
11187 | F: include/linux/vlynq.h | 11224 | F: include/linux/vlynq.h |
11188 | 11225 | ||
11189 | VME SUBSYSTEM | 11226 | VME SUBSYSTEM |
11190 | M: Martyn Welch <martyn.welch@ge.com> | 11227 | M: Martyn Welch <martyn@welchs.me.uk> |
11191 | M: Manohar Vanga <manohar.vanga@gmail.com> | 11228 | M: Manohar Vanga <manohar.vanga@gmail.com> |
11192 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 11229 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
11193 | L: devel@driverdev.osuosl.org | 11230 | L: devel@driverdev.osuosl.org |
@@ -11239,7 +11276,6 @@ VOLTAGE AND CURRENT REGULATOR FRAMEWORK | |||
11239 | M: Liam Girdwood <lgirdwood@gmail.com> | 11276 | M: Liam Girdwood <lgirdwood@gmail.com> |
11240 | M: Mark Brown <broonie@kernel.org> | 11277 | M: Mark Brown <broonie@kernel.org> |
11241 | L: linux-kernel@vger.kernel.org | 11278 | L: linux-kernel@vger.kernel.org |
11242 | W: http://opensource.wolfsonmicro.com/node/15 | ||
11243 | W: http://www.slimlogic.co.uk/?p=48 | 11279 | W: http://www.slimlogic.co.uk/?p=48 |
11244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git | 11280 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git |
11245 | S: Supported | 11281 | S: Supported |
@@ -11253,6 +11289,7 @@ L: netdev@vger.kernel.org | |||
11253 | S: Maintained | 11289 | S: Maintained |
11254 | F: drivers/net/vrf.c | 11290 | F: drivers/net/vrf.c |
11255 | F: include/net/vrf.h | 11291 | F: include/net/vrf.h |
11292 | F: Documentation/networking/vrf.txt | ||
11256 | 11293 | ||
11257 | VT1211 HARDWARE MONITOR DRIVER | 11294 | VT1211 HARDWARE MONITOR DRIVER |
11258 | M: Juerg Haefliger <juergh@gmail.com> | 11295 | M: Juerg Haefliger <juergh@gmail.com> |
@@ -11364,21 +11401,10 @@ W: http://oops.ghostprotocols.net:81/blog | |||
11364 | S: Maintained | 11401 | S: Maintained |
11365 | F: drivers/net/wireless/wl3501* | 11402 | F: drivers/net/wireless/wl3501* |
11366 | 11403 | ||
11367 | WM97XX TOUCHSCREEN DRIVERS | ||
11368 | M: Mark Brown <broonie@kernel.org> | ||
11369 | M: Liam Girdwood <lrg@slimlogic.co.uk> | ||
11370 | L: linux-input@vger.kernel.org | ||
11371 | T: git git://opensource.wolfsonmicro.com/linux-2.6-touch | ||
11372 | W: http://opensource.wolfsonmicro.com/node/7 | ||
11373 | S: Supported | ||
11374 | F: drivers/input/touchscreen/*wm97* | ||
11375 | F: include/linux/wm97xx.h | ||
11376 | |||
11377 | WOLFSON MICROELECTRONICS DRIVERS | 11404 | WOLFSON MICROELECTRONICS DRIVERS |
11378 | L: patches@opensource.wolfsonmicro.com | 11405 | L: patches@opensource.wolfsonmicro.com |
11379 | T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc | 11406 | T: git https://github.com/CirrusLogic/linux-drivers.git |
11380 | T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus | 11407 | W: https://github.com/CirrusLogic/linux-drivers/wiki |
11381 | W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices | ||
11382 | S: Supported | 11408 | S: Supported |
11383 | F: Documentation/hwmon/wm83?? | 11409 | F: Documentation/hwmon/wm83?? |
11384 | F: arch/arm/mach-s3c64xx/mach-crag6410* | 11410 | F: arch/arm/mach-s3c64xx/mach-crag6410* |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 3 | 2 | PATCHLEVEL = 3 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Blurry Fish Butt |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index f05bdb4b1cb9..ff4049155c84 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h | |||
@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset, | |||
297 | unsigned long size) | 297 | unsigned long size) |
298 | { | 298 | { |
299 | return ioremap(offset, size); | 299 | return ioremap(offset, size); |
300 | } | 300 | } |
301 | |||
302 | #define ioremap_uc ioremap_nocache | ||
301 | 303 | ||
302 | static inline void iounmap(volatile void __iomem *addr) | 304 | static inline void iounmap(volatile void __iomem *addr) |
303 | { | 305 | { |
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1521..902e6ab00a06 100644 --- a/arch/alpha/include/asm/word-at-a-time.h +++ b/arch/alpha/include/asm/word-at-a-time.h | |||
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) | |||
52 | #endif | 52 | #endif |
53 | } | 53 | } |
54 | 54 | ||
55 | #define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) | ||
56 | |||
55 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 57 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2804648c8ff4..2d6efcff3bf3 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -117,6 +117,6 @@ handle_irq(int irq) | |||
117 | } | 117 | } |
118 | 118 | ||
119 | irq_enter(); | 119 | irq_enter(); |
120 | generic_handle_irq_desc(irq, desc); | 120 | generic_handle_irq_desc(desc); |
121 | irq_exit(); | 121 | irq_exit(); |
122 | } | 122 | } |
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index cded02c890aa..5f387ee5b5c5 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c | |||
@@ -242,7 +242,12 @@ pci_restore_srm_config(void) | |||
242 | 242 | ||
243 | void pcibios_fixup_bus(struct pci_bus *bus) | 243 | void pcibios_fixup_bus(struct pci_bus *bus) |
244 | { | 244 | { |
245 | struct pci_dev *dev; | 245 | struct pci_dev *dev = bus->self; |
246 | |||
247 | if (pci_has_flag(PCI_PROBE_ONLY) && dev && | ||
248 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
249 | pci_read_bridge_bases(bus); | ||
250 | } | ||
246 | 251 | ||
247 | list_for_each_entry(dev, &bus->devices, bus_list) { | 252 | list_for_each_entry(dev, &bus->devices, bus_list) { |
248 | pdev_save_srm_config(dev); | 253 | pdev_save_srm_config(dev); |
diff --git a/arch/alpha/lib/udelay.c b/arch/alpha/lib/udelay.c index 69d52aa37bae..f2d81ff38aa6 100644 --- a/arch/alpha/lib/udelay.c +++ b/arch/alpha/lib/udelay.c | |||
@@ -30,6 +30,7 @@ __delay(int loops) | |||
30 | " bgt %0,1b" | 30 | " bgt %0,1b" |
31 | : "=&r" (tmp), "=r" (loops) : "1"(loops)); | 31 | : "=&r" (tmp), "=r" (loops) : "1"(loops)); |
32 | } | 32 | } |
33 | EXPORT_SYMBOL(__delay); | ||
33 | 34 | ||
34 | #ifdef CONFIG_SMP | 35 | #ifdef CONFIG_SMP |
35 | #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy | 36 | #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy |
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -48,4 +48,5 @@ generic-y += types.h | |||
48 | generic-y += ucontext.h | 48 | generic-y += ucontext.h |
49 | generic-y += user.h | 49 | generic-y += user.h |
50 | generic-y += vga.h | 50 | generic-y += vga.h |
51 | generic-y += word-at-a-time.h | ||
51 | generic-y += xor.h | 52 | generic-y += xor.h |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index d9e44b62df05..4ffd1855f1bd 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -252,7 +252,7 @@ static struct irq_chip idu_irq_chip = { | |||
252 | 252 | ||
253 | static int idu_first_irq; | 253 | static int idu_first_irq; |
254 | 254 | ||
255 | static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc) | 255 | static void idu_cascade_isr(struct irq_desc *desc) |
256 | { | 256 | { |
257 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | 257 | struct irq_domain *domain = irq_desc_get_handler_data(desc); |
258 | unsigned int core_irq = irq_desc_get_irq(desc); | 258 | unsigned int core_irq = irq_desc_get_irq(desc); |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7451b447cc2d..2c2b28ee4811 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -54,6 +54,14 @@ AS += -EL | |||
54 | LD += -EL | 54 | LD += -EL |
55 | endif | 55 | endif |
56 | 56 | ||
57 | # | ||
58 | # The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and | ||
59 | # later may result in code being generated that handles signed short and signed | ||
60 | # char struct members incorrectly. So disable it. | ||
61 | # (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932) | ||
62 | # | ||
63 | KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra) | ||
64 | |||
57 | # This selects which instruction set is used. | 65 | # This selects which instruction set is used. |
58 | # Note that GCC does not numerically define an architecture version | 66 | # Note that GCC does not numerically define an architecture version |
59 | # macro, but instead defines a whole series of macros which makes | 67 | # macro, but instead defines a whole series of macros which makes |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 233159d2eaab..bb8fa023d574 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \ | |||
578 | sun4i-a10-hackberry.dtb \ | 578 | sun4i-a10-hackberry.dtb \ |
579 | sun4i-a10-hyundai-a7hd.dtb \ | 579 | sun4i-a10-hyundai-a7hd.dtb \ |
580 | sun4i-a10-inet97fv2.dtb \ | 580 | sun4i-a10-inet97fv2.dtb \ |
581 | sun4i-a10-itead-iteaduino-plus.dts \ | 581 | sun4i-a10-itead-iteaduino-plus.dtb \ |
582 | sun4i-a10-jesurun-q5.dtb \ | 582 | sun4i-a10-jesurun-q5.dtb \ |
583 | sun4i-a10-marsboard.dtb \ | 583 | sun4i-a10-marsboard.dtb \ |
584 | sun4i-a10-mini-xplus.dtb \ | 584 | sun4i-a10-mini-xplus.dtb \ |
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi index 4d28fc3aac69..5dd084f3c81c 100644 --- a/arch/arm/boot/dts/am335x-phycore-som.dtsi +++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi | |||
@@ -252,10 +252,10 @@ | |||
252 | }; | 252 | }; |
253 | 253 | ||
254 | vdd1_reg: regulator@2 { | 254 | vdd1_reg: regulator@2 { |
255 | /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ | 255 | /* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */ |
256 | regulator-name = "vdd_mpu"; | 256 | regulator-name = "vdd_mpu"; |
257 | regulator-min-microvolt = <912500>; | 257 | regulator-min-microvolt = <912500>; |
258 | regulator-max-microvolt = <1312500>; | 258 | regulator-max-microvolt = <1378000>; |
259 | regulator-boot-on; | 259 | regulator-boot-on; |
260 | regulator-always-on; | 260 | regulator-always-on; |
261 | }; | 261 | }; |
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 3a05b94f59ed..568adf5efde0 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
@@ -98,13 +98,6 @@ | |||
98 | pinctrl-0 = <&extcon_usb1_pins>; | 98 | pinctrl-0 = <&extcon_usb1_pins>; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | extcon_usb2: extcon_usb2 { | ||
102 | compatible = "linux,extcon-usb-gpio"; | ||
103 | id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>; | ||
104 | pinctrl-names = "default"; | ||
105 | pinctrl-0 = <&extcon_usb2_pins>; | ||
106 | }; | ||
107 | |||
108 | hdmi0: connector { | 101 | hdmi0: connector { |
109 | compatible = "hdmi-connector"; | 102 | compatible = "hdmi-connector"; |
110 | label = "hdmi"; | 103 | label = "hdmi"; |
@@ -326,12 +319,6 @@ | |||
326 | >; | 319 | >; |
327 | }; | 320 | }; |
328 | 321 | ||
329 | extcon_usb2_pins: extcon_usb2_pins { | ||
330 | pinctrl-single,pins = < | ||
331 | 0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */ | ||
332 | >; | ||
333 | }; | ||
334 | |||
335 | tpd12s015_pins: pinmux_tpd12s015_pins { | 322 | tpd12s015_pins: pinmux_tpd12s015_pins { |
336 | pinctrl-single,pins = < | 323 | pinctrl-single,pins = < |
337 | 0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ | 324 | 0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ |
@@ -432,7 +419,7 @@ | |||
432 | }; | 419 | }; |
433 | 420 | ||
434 | ldo3_reg: ldo3 { | 421 | ldo3_reg: ldo3 { |
435 | /* VDDA_1V8_PHY */ | 422 | /* VDDA_1V8_PHYA */ |
436 | regulator-name = "ldo3"; | 423 | regulator-name = "ldo3"; |
437 | regulator-min-microvolt = <1800000>; | 424 | regulator-min-microvolt = <1800000>; |
438 | regulator-max-microvolt = <1800000>; | 425 | regulator-max-microvolt = <1800000>; |
@@ -440,6 +427,15 @@ | |||
440 | regulator-boot-on; | 427 | regulator-boot-on; |
441 | }; | 428 | }; |
442 | 429 | ||
430 | ldo4_reg: ldo4 { | ||
431 | /* VDDA_1V8_PHYB */ | ||
432 | regulator-name = "ldo4"; | ||
433 | regulator-min-microvolt = <1800000>; | ||
434 | regulator-max-microvolt = <1800000>; | ||
435 | regulator-always-on; | ||
436 | regulator-boot-on; | ||
437 | }; | ||
438 | |||
443 | ldo9_reg: ldo9 { | 439 | ldo9_reg: ldo9 { |
444 | /* VDD_RTC */ | 440 | /* VDD_RTC */ |
445 | regulator-name = "ldo9"; | 441 | regulator-name = "ldo9"; |
@@ -495,6 +491,14 @@ | |||
495 | gpio-controller; | 491 | gpio-controller; |
496 | #gpio-cells = <2>; | 492 | #gpio-cells = <2>; |
497 | }; | 493 | }; |
494 | |||
495 | extcon_usb2: tps659038_usb { | ||
496 | compatible = "ti,palmas-usb-vid"; | ||
497 | ti,enable-vbus-detection; | ||
498 | ti,enable-id-detection; | ||
499 | id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>; | ||
500 | }; | ||
501 | |||
498 | }; | 502 | }; |
499 | 503 | ||
500 | tmp102: tmp102@48 { | 504 | tmp102: tmp102@48 { |
@@ -517,7 +521,8 @@ | |||
517 | mcp_rtc: rtc@6f { | 521 | mcp_rtc: rtc@6f { |
518 | compatible = "microchip,mcp7941x"; | 522 | compatible = "microchip,mcp7941x"; |
519 | reg = <0x6f>; | 523 | reg = <0x6f>; |
520 | interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */ | 524 | interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, |
525 | <&dra7_pmx_core 0x424>; | ||
521 | 526 | ||
522 | pinctrl-names = "default"; | 527 | pinctrl-names = "default"; |
523 | pinctrl-0 = <&mcp79410_pins_default>; | 528 | pinctrl-0 = <&mcp79410_pins_default>; |
@@ -579,7 +584,6 @@ | |||
579 | pinctrl-0 = <&mmc1_pins_default>; | 584 | pinctrl-0 = <&mmc1_pins_default>; |
580 | 585 | ||
581 | vmmc-supply = <&ldo1_reg>; | 586 | vmmc-supply = <&ldo1_reg>; |
582 | vmmc_aux-supply = <&vdd_3v3>; | ||
583 | bus-width = <4>; | 587 | bus-width = <4>; |
584 | cd-gpios = <&gpio6 27 0>; /* gpio 219 */ | 588 | cd-gpios = <&gpio6 27 0>; /* gpio 219 */ |
585 | }; | 589 | }; |
@@ -623,6 +627,14 @@ | |||
623 | }; | 627 | }; |
624 | 628 | ||
625 | &usb2 { | 629 | &usb2 { |
630 | /* | ||
631 | * Stand alone usage is peripheral only. | ||
632 | * However, with some resistor modifications | ||
633 | * this port can be used via expansion connectors | ||
634 | * as "host" or "dual-role". If so, provide | ||
635 | * the necessary dr_mode override in the expansion | ||
636 | * board's DT. | ||
637 | */ | ||
626 | dr_mode = "peripheral"; | 638 | dr_mode = "peripheral"; |
627 | }; | 639 | }; |
628 | 640 | ||
@@ -681,7 +693,7 @@ | |||
681 | 693 | ||
682 | &hdmi { | 694 | &hdmi { |
683 | status = "ok"; | 695 | status = "ok"; |
684 | vdda-supply = <&ldo3_reg>; | 696 | vdda-supply = <&ldo4_reg>; |
685 | 697 | ||
686 | pinctrl-names = "default"; | 698 | pinctrl-names = "default"; |
687 | pinctrl-0 = <&hdmi_pins>; | 699 | pinctrl-0 = <&hdmi_pins>; |
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index 92bacd3c8fab..109fd4711647 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts | |||
@@ -19,10 +19,10 @@ | |||
19 | 19 | ||
20 | &cpsw_emac0 { | 20 | &cpsw_emac0 { |
21 | phy_id = <&davinci_mdio>, <0>; | 21 | phy_id = <&davinci_mdio>, <0>; |
22 | phy-mode = "mii"; | 22 | phy-mode = "rgmii"; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | &cpsw_emac1 { | 25 | &cpsw_emac1 { |
26 | phy_id = <&davinci_mdio>, <1>; | 26 | phy_id = <&davinci_mdio>, <1>; |
27 | phy-mode = "mii"; | 27 | phy-mode = "rgmii"; |
28 | }; | 28 | }; |
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 8c4bbc7573df..79838dd8dee7 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "dm814x.dtsi" | 8 | #include "dm814x.dtsi" |
9 | 9 | ||
10 | / { | 10 | / { |
11 | model = "DM8148 EVM"; | 11 | model = "HP t410 Smart Zero Client"; |
12 | compatible = "hp,t410", "ti,dm8148"; | 12 | compatible = "hp,t410", "ti,dm8148"; |
13 | 13 | ||
14 | memory { | 14 | memory { |
@@ -19,10 +19,10 @@ | |||
19 | 19 | ||
20 | &cpsw_emac0 { | 20 | &cpsw_emac0 { |
21 | phy_id = <&davinci_mdio>, <0>; | 21 | phy_id = <&davinci_mdio>, <0>; |
22 | phy-mode = "mii"; | 22 | phy-mode = "rgmii"; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | &cpsw_emac1 { | 25 | &cpsw_emac1 { |
26 | phy_id = <&davinci_mdio>, <1>; | 26 | phy_id = <&davinci_mdio>, <1>; |
27 | phy-mode = "mii"; | 27 | phy-mode = "rgmii"; |
28 | }; | 28 | }; |
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 972c9c9e885b..7988b42e5764 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi | |||
@@ -181,9 +181,9 @@ | |||
181 | ti,hwmods = "timer3"; | 181 | ti,hwmods = "timer3"; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | control: control@160000 { | 184 | control: control@140000 { |
185 | compatible = "ti,dm814-scm", "simple-bus"; | 185 | compatible = "ti,dm814-scm", "simple-bus"; |
186 | reg = <0x160000 0x16d000>; | 186 | reg = <0x140000 0x16d000>; |
187 | #address-cells = <1>; | 187 | #address-cells = <1>; |
188 | #size-cells = <1>; | 188 | #size-cells = <1>; |
189 | ranges = <0 0x160000 0x16d000>; | 189 | ranges = <0 0x160000 0x16d000>; |
@@ -321,9 +321,9 @@ | |||
321 | mac-address = [ 00 00 00 00 00 00 ]; | 321 | mac-address = [ 00 00 00 00 00 00 ]; |
322 | }; | 322 | }; |
323 | 323 | ||
324 | phy_sel: cpsw-phy-sel@0x48160650 { | 324 | phy_sel: cpsw-phy-sel@48140650 { |
325 | compatible = "ti,am3352-cpsw-phy-sel"; | 325 | compatible = "ti,am3352-cpsw-phy-sel"; |
326 | reg= <0x48160650 0x4>; | 326 | reg= <0x48140650 0x4>; |
327 | reg-names = "gmii-sel"; | 327 | reg-names = "gmii-sel"; |
328 | }; | 328 | }; |
329 | }; | 329 | }; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5d65db9ebc2b..e289c706d27d 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -120,9 +120,10 @@ | |||
120 | reg = <0x0 0x1400>; | 120 | reg = <0x0 0x1400>; |
121 | #address-cells = <1>; | 121 | #address-cells = <1>; |
122 | #size-cells = <1>; | 122 | #size-cells = <1>; |
123 | ranges = <0 0x0 0x1400>; | ||
123 | 124 | ||
124 | pbias_regulator: pbias_regulator { | 125 | pbias_regulator: pbias_regulator { |
125 | compatible = "ti,pbias-omap"; | 126 | compatible = "ti,pbias-dra7", "ti,pbias-omap"; |
126 | reg = <0xe00 0x4>; | 127 | reg = <0xe00 0x4>; |
127 | syscon = <&scm_conf>; | 128 | syscon = <&scm_conf>; |
128 | pbias_mmc_reg: pbias_mmc_omap5 { | 129 | pbias_mmc_reg: pbias_mmc_omap5 { |
@@ -1417,7 +1418,7 @@ | |||
1417 | ti,irqs-safe-map = <0>; | 1418 | ti,irqs-safe-map = <0>; |
1418 | }; | 1419 | }; |
1419 | 1420 | ||
1420 | mac: ethernet@4a100000 { | 1421 | mac: ethernet@48484000 { |
1421 | compatible = "ti,dra7-cpsw","ti,cpsw"; | 1422 | compatible = "ti,dra7-cpsw","ti,cpsw"; |
1422 | ti,hwmods = "gmac"; | 1423 | ti,hwmods = "gmac"; |
1423 | clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; | 1424 | clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; |
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index ca0e3c15977f..294cfe40388d 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi | |||
@@ -98,6 +98,7 @@ | |||
98 | opp-hz = /bits/ 64 <800000000>; | 98 | opp-hz = /bits/ 64 <800000000>; |
99 | opp-microvolt = <1000000>; | 99 | opp-microvolt = <1000000>; |
100 | clock-latency-ns = <200000>; | 100 | clock-latency-ns = <200000>; |
101 | opp-suspend; | ||
101 | }; | 102 | }; |
102 | opp07 { | 103 | opp07 { |
103 | opp-hz = /bits/ 64 <900000000>; | 104 | opp-hz = /bits/ 64 <900000000>; |
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 15aea760c1da..c625e71217aa 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts | |||
@@ -197,6 +197,7 @@ | |||
197 | regulator-name = "P1.8V_LDO_OUT10"; | 197 | regulator-name = "P1.8V_LDO_OUT10"; |
198 | regulator-min-microvolt = <1800000>; | 198 | regulator-min-microvolt = <1800000>; |
199 | regulator-max-microvolt = <1800000>; | 199 | regulator-max-microvolt = <1800000>; |
200 | regulator-always-on; | ||
200 | }; | 201 | }; |
201 | 202 | ||
202 | ldo11_reg: LDO11 { | 203 | ldo11_reg: LDO11 { |
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index df9aee92ecf4..1b3d6c769a3c 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -1117,7 +1117,7 @@ | |||
1117 | interrupt-parent = <&combiner>; | 1117 | interrupt-parent = <&combiner>; |
1118 | interrupts = <3 0>; | 1118 | interrupts = <3 0>; |
1119 | clock-names = "sysmmu", "master"; | 1119 | clock-names = "sysmmu", "master"; |
1120 | clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; | 1120 | clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>; |
1121 | power-domains = <&disp_pd>; | 1121 | power-domains = <&disp_pd>; |
1122 | #iommu-cells = <0>; | 1122 | #iommu-cells = <0>; |
1123 | }; | 1123 | }; |
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 79ffdfe712aa..3b43e57845ae 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi | |||
@@ -472,7 +472,6 @@ | |||
472 | */ | 472 | */ |
473 | pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; | 473 | pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; |
474 | pinctrl-names = "default"; | 474 | pinctrl-names = "default"; |
475 | samsung,pwm-outputs = <0>; | ||
476 | status = "okay"; | 475 | status = "okay"; |
477 | }; | 476 | }; |
478 | 477 | ||
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index 66e47de5e826..96d7eede412e 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts | |||
@@ -36,7 +36,7 @@ | |||
36 | pinctrl-0 = <&pinctrl_pmic>; | 36 | pinctrl-0 = <&pinctrl_pmic>; |
37 | reg = <0x08>; | 37 | reg = <0x08>; |
38 | interrupt-parent = <&gpio5>; | 38 | interrupt-parent = <&gpio5>; |
39 | interrupts = <23 0x8>; | 39 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; |
40 | regulators { | 40 | regulators { |
41 | sw1_reg: sw1a { | 41 | sw1_reg: sw1a { |
42 | regulator-name = "SW1"; | 42 | regulator-name = "SW1"; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c3e3ca9362fb..cd170376eaca 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <dt-bindings/clock/imx5-clock.h> | 15 | #include <dt-bindings/clock/imx5-clock.h> |
16 | #include <dt-bindings/gpio/gpio.h> | 16 | #include <dt-bindings/gpio/gpio.h> |
17 | #include <dt-bindings/input/input.h> | 17 | #include <dt-bindings/input/input.h> |
18 | #include <dt-bindings/interrupt-controller/irq.h> | ||
18 | 19 | ||
19 | / { | 20 | / { |
20 | aliases { | 21 | aliases { |
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 3373fd958e95..a50356243888 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi | |||
@@ -35,7 +35,6 @@ | |||
35 | compatible = "regulator-fixed"; | 35 | compatible = "regulator-fixed"; |
36 | reg = <1>; | 36 | reg = <1>; |
37 | pinctrl-names = "default"; | 37 | pinctrl-names = "default"; |
38 | pinctrl-0 = <&pinctrl_usbh1>; | ||
39 | regulator-name = "usbh1_vbus"; | 38 | regulator-name = "usbh1_vbus"; |
40 | regulator-min-microvolt = <5000000>; | 39 | regulator-min-microvolt = <5000000>; |
41 | regulator-max-microvolt = <5000000>; | 40 | regulator-max-microvolt = <5000000>; |
@@ -47,7 +46,6 @@ | |||
47 | compatible = "regulator-fixed"; | 46 | compatible = "regulator-fixed"; |
48 | reg = <2>; | 47 | reg = <2>; |
49 | pinctrl-names = "default"; | 48 | pinctrl-names = "default"; |
50 | pinctrl-0 = <&pinctrl_usbotg>; | ||
51 | regulator-name = "usb_otg_vbus"; | 49 | regulator-name = "usb_otg_vbus"; |
52 | regulator-min-microvolt = <5000000>; | 50 | regulator-min-microvolt = <5000000>; |
53 | regulator-max-microvolt = <5000000>; | 51 | regulator-max-microvolt = <5000000>; |
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index 2390f387c271..798dda072b2a 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi | |||
@@ -56,6 +56,7 @@ | |||
56 | reg = <0x270 0x240>; | 56 | reg = <0x270 0x240>; |
57 | #address-cells = <1>; | 57 | #address-cells = <1>; |
58 | #size-cells = <1>; | 58 | #size-cells = <1>; |
59 | ranges = <0 0x270 0x240>; | ||
59 | 60 | ||
60 | scm_clocks: clocks { | 61 | scm_clocks: clocks { |
61 | #address-cells = <1>; | 62 | #address-cells = <1>; |
@@ -63,7 +64,7 @@ | |||
63 | }; | 64 | }; |
64 | 65 | ||
65 | pbias_regulator: pbias_regulator { | 66 | pbias_regulator: pbias_regulator { |
66 | compatible = "ti,pbias-omap"; | 67 | compatible = "ti,pbias-omap2", "ti,pbias-omap"; |
67 | reg = <0x230 0x4>; | 68 | reg = <0x230 0x4>; |
68 | syscon = <&scm_conf>; | 69 | syscon = <&scm_conf>; |
69 | pbias_mmc_reg: pbias_mmc_omap2430 { | 70 | pbias_mmc_reg: pbias_mmc_omap2430 { |
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index a5474113cd50..67659a0ed13e 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts | |||
@@ -202,7 +202,7 @@ | |||
202 | 202 | ||
203 | tfp410_pins: pinmux_tfp410_pins { | 203 | tfp410_pins: pinmux_tfp410_pins { |
204 | pinctrl-single,pins = < | 204 | pinctrl-single,pins = < |
205 | 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ | 205 | 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ |
206 | >; | 206 | >; |
207 | }; | 207 | }; |
208 | 208 | ||
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi index d5e5cd449b16..2230e1c03320 100644 --- a/arch/arm/boot/dts/omap3-igep.dtsi +++ b/arch/arm/boot/dts/omap3-igep.dtsi | |||
@@ -78,12 +78,6 @@ | |||
78 | >; | 78 | >; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | smsc9221_pins: pinmux_smsc9221_pins { | ||
82 | pinctrl-single,pins = < | ||
83 | 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ | ||
84 | >; | ||
85 | }; | ||
86 | |||
87 | i2c1_pins: pinmux_i2c1_pins { | 81 | i2c1_pins: pinmux_i2c1_pins { |
88 | pinctrl-single,pins = < | 82 | pinctrl-single,pins = < |
89 | 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | 83 | 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ |
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi index e458c2185e3c..5ad688c57a00 100644 --- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi +++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi | |||
@@ -156,6 +156,12 @@ | |||
156 | OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ | 156 | OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ |
157 | >; | 157 | >; |
158 | }; | 158 | }; |
159 | |||
160 | smsc9221_pins: pinmux_smsc9221_pins { | ||
161 | pinctrl-single,pins = < | ||
162 | OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ | ||
163 | >; | ||
164 | }; | ||
159 | }; | 165 | }; |
160 | 166 | ||
161 | &omap3_pmx_core2 { | 167 | &omap3_pmx_core2 { |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 69a40cfc1f29..8a2b25332b8c 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -113,10 +113,22 @@ | |||
113 | }; | 113 | }; |
114 | 114 | ||
115 | scm_conf: scm_conf@270 { | 115 | scm_conf: scm_conf@270 { |
116 | compatible = "syscon"; | 116 | compatible = "syscon", "simple-bus"; |
117 | reg = <0x270 0x330>; | 117 | reg = <0x270 0x330>; |
118 | #address-cells = <1>; | 118 | #address-cells = <1>; |
119 | #size-cells = <1>; | 119 | #size-cells = <1>; |
120 | ranges = <0 0x270 0x330>; | ||
121 | |||
122 | pbias_regulator: pbias_regulator { | ||
123 | compatible = "ti,pbias-omap3", "ti,pbias-omap"; | ||
124 | reg = <0x2b0 0x4>; | ||
125 | syscon = <&scm_conf>; | ||
126 | pbias_mmc_reg: pbias_mmc_omap2430 { | ||
127 | regulator-name = "pbias_mmc_omap2430"; | ||
128 | regulator-min-microvolt = <1800000>; | ||
129 | regulator-max-microvolt = <3000000>; | ||
130 | }; | ||
131 | }; | ||
120 | 132 | ||
121 | scm_clocks: clocks { | 133 | scm_clocks: clocks { |
122 | #address-cells = <1>; | 134 | #address-cells = <1>; |
@@ -202,17 +214,6 @@ | |||
202 | dma-requests = <96>; | 214 | dma-requests = <96>; |
203 | }; | 215 | }; |
204 | 216 | ||
205 | pbias_regulator: pbias_regulator { | ||
206 | compatible = "ti,pbias-omap"; | ||
207 | reg = <0x2b0 0x4>; | ||
208 | syscon = <&scm_conf>; | ||
209 | pbias_mmc_reg: pbias_mmc_omap2430 { | ||
210 | regulator-name = "pbias_mmc_omap2430"; | ||
211 | regulator-min-microvolt = <1800000>; | ||
212 | regulator-max-microvolt = <3000000>; | ||
213 | }; | ||
214 | }; | ||
215 | |||
216 | gpio1: gpio@48310000 { | 217 | gpio1: gpio@48310000 { |
217 | compatible = "ti,omap3-gpio"; | 218 | compatible = "ti,omap3-gpio"; |
218 | reg = <0x48310000 0x200>; | 219 | reg = <0x48310000 0x200>; |
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index abc4473e6f8a..5a206c100ce2 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi | |||
@@ -196,9 +196,10 @@ | |||
196 | reg = <0x5a0 0x170>; | 196 | reg = <0x5a0 0x170>; |
197 | #address-cells = <1>; | 197 | #address-cells = <1>; |
198 | #size-cells = <1>; | 198 | #size-cells = <1>; |
199 | ranges = <0 0x5a0 0x170>; | ||
199 | 200 | ||
200 | pbias_regulator: pbias_regulator { | 201 | pbias_regulator: pbias_regulator { |
201 | compatible = "ti,pbias-omap"; | 202 | compatible = "ti,pbias-omap4", "ti,pbias-omap"; |
202 | reg = <0x60 0x4>; | 203 | reg = <0x60 0x4>; |
203 | syscon = <&omap4_padconf_global>; | 204 | syscon = <&omap4_padconf_global>; |
204 | pbias_mmc_reg: pbias_mmc_omap4 { | 205 | pbias_mmc_reg: pbias_mmc_omap4 { |
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 3cc8f357d5b8..3cb030f9d2c4 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
@@ -174,8 +174,8 @@ | |||
174 | 174 | ||
175 | i2c5_pins: pinmux_i2c5_pins { | 175 | i2c5_pins: pinmux_i2c5_pins { |
176 | pinctrl-single,pins = < | 176 | pinctrl-single,pins = < |
177 | 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ | 177 | 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ |
178 | 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ | 178 | 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ |
179 | >; | 179 | >; |
180 | }; | 180 | }; |
181 | 181 | ||
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 4205a8ac9ddb..4c04389dab32 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -185,9 +185,10 @@ | |||
185 | reg = <0x5a0 0xec>; | 185 | reg = <0x5a0 0xec>; |
186 | #address-cells = <1>; | 186 | #address-cells = <1>; |
187 | #size-cells = <1>; | 187 | #size-cells = <1>; |
188 | ranges = <0 0x5a0 0xec>; | ||
188 | 189 | ||
189 | pbias_regulator: pbias_regulator { | 190 | pbias_regulator: pbias_regulator { |
190 | compatible = "ti,pbias-omap"; | 191 | compatible = "ti,pbias-omap5", "ti,pbias-omap"; |
191 | reg = <0x60 0x4>; | 192 | reg = <0x60 0x4>; |
192 | syscon = <&omap5_padconf_global>; | 193 | syscon = <&omap5_padconf_global>; |
193 | pbias_mmc_reg: pbias_mmc_omap5 { | 194 | pbias_mmc_reg: pbias_mmc_omap5 { |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index a0b2a79cbfbd..4624d0f2a754 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
@@ -1627,6 +1627,7 @@ | |||
1627 | "mix.0", "mix.1", | 1627 | "mix.0", "mix.1", |
1628 | "dvc.0", "dvc.1", | 1628 | "dvc.0", "dvc.1", |
1629 | "clk_a", "clk_b", "clk_c", "clk_i"; | 1629 | "clk_a", "clk_b", "clk_c", "clk_i"; |
1630 | power-domains = <&cpg_clocks>; | ||
1630 | 1631 | ||
1631 | status = "disabled"; | 1632 | status = "disabled"; |
1632 | 1633 | ||
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 831525dd39a6..1666c8a6b143 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi | |||
@@ -1677,6 +1677,7 @@ | |||
1677 | "mix.0", "mix.1", | 1677 | "mix.0", "mix.1", |
1678 | "dvc.0", "dvc.1", | 1678 | "dvc.0", "dvc.1", |
1679 | "clk_a", "clk_b", "clk_c", "clk_i"; | 1679 | "clk_a", "clk_b", "clk_c", "clk_i"; |
1680 | power-domains = <&cpg_clocks>; | ||
1680 | 1681 | ||
1681 | status = "disabled"; | 1682 | status = "disabled"; |
1682 | 1683 | ||
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 2fa7a0dc83f7..275c78ccc0f3 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi | |||
@@ -158,6 +158,7 @@ | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | &hdmi { | 160 | &hdmi { |
161 | ddc-i2c-bus = <&i2c5>; | ||
161 | status = "okay"; | 162 | status = "okay"; |
162 | }; | 163 | }; |
163 | 164 | ||
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index 3efa3b2ebe90..6b914e4bb099 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi | |||
@@ -103,48 +103,46 @@ | |||
103 | <&clk_s_d0_quadfs 0>, | 103 | <&clk_s_d0_quadfs 0>, |
104 | <&clk_s_d2_quadfs 0>, | 104 | <&clk_s_d2_quadfs 0>, |
105 | <&clk_s_d2_quadfs 0>; | 105 | <&clk_s_d2_quadfs 0>; |
106 | ranges; | 106 | }; |
107 | 107 | ||
108 | sti-hdmi@8d04000 { | 108 | sti-hdmi@8d04000 { |
109 | compatible = "st,stih407-hdmi"; | 109 | compatible = "st,stih407-hdmi"; |
110 | reg = <0x8d04000 0x1000>; | 110 | reg = <0x8d04000 0x1000>; |
111 | reg-names = "hdmi-reg"; | 111 | reg-names = "hdmi-reg"; |
112 | interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; | 112 | interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; |
113 | interrupt-names = "irq"; | 113 | interrupt-names = "irq"; |
114 | clock-names = "pix", | 114 | clock-names = "pix", |
115 | "tmds", | 115 | "tmds", |
116 | "phy", | 116 | "phy", |
117 | "audio", | 117 | "audio", |
118 | "main_parent", | 118 | "main_parent", |
119 | "aux_parent"; | 119 | "aux_parent"; |
120 | 120 | ||
121 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, | 121 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, |
122 | <&clk_s_d2_flexgen CLK_TMDS_HDMI>, | 122 | <&clk_s_d2_flexgen CLK_TMDS_HDMI>, |
123 | <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, | 123 | <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, |
124 | <&clk_s_d0_flexgen CLK_PCM_0>, | 124 | <&clk_s_d0_flexgen CLK_PCM_0>, |
125 | <&clk_s_d2_quadfs 0>, | 125 | <&clk_s_d2_quadfs 0>, |
126 | <&clk_s_d2_quadfs 1>; | 126 | <&clk_s_d2_quadfs 1>; |
127 | 127 | ||
128 | hdmi,hpd-gpio = <&pio5 3>; | 128 | hdmi,hpd-gpio = <&pio5 3>; |
129 | reset-names = "hdmi"; | 129 | reset-names = "hdmi"; |
130 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; | 130 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
131 | ddc = <&hdmiddc>; | 131 | ddc = <&hdmiddc>; |
132 | 132 | }; | |
133 | }; | 133 | |
134 | 134 | sti-hda@8d02000 { | |
135 | sti-hda@8d02000 { | 135 | compatible = "st,stih407-hda"; |
136 | compatible = "st,stih407-hda"; | 136 | reg = <0x8d02000 0x400>, <0x92b0120 0x4>; |
137 | reg = <0x8d02000 0x400>, <0x92b0120 0x4>; | 137 | reg-names = "hda-reg", "video-dacs-ctrl"; |
138 | reg-names = "hda-reg", "video-dacs-ctrl"; | 138 | clock-names = "pix", |
139 | clock-names = "pix", | 139 | "hddac", |
140 | "hddac", | 140 | "main_parent", |
141 | "main_parent", | 141 | "aux_parent"; |
142 | "aux_parent"; | 142 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, |
143 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, | 143 | <&clk_s_d2_flexgen CLK_HDDAC>, |
144 | <&clk_s_d2_flexgen CLK_HDDAC>, | 144 | <&clk_s_d2_quadfs 0>, |
145 | <&clk_s_d2_quadfs 0>, | 145 | <&clk_s_d2_quadfs 1>; |
146 | <&clk_s_d2_quadfs 1>; | ||
147 | }; | ||
148 | }; | 146 | }; |
149 | }; | 147 | }; |
150 | }; | 148 | }; |
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 6f40bc99c22f..8c6e61a27234 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi | |||
@@ -178,48 +178,46 @@ | |||
178 | <&clk_s_d0_quadfs 0>, | 178 | <&clk_s_d0_quadfs 0>, |
179 | <&clk_s_d2_quadfs 0>, | 179 | <&clk_s_d2_quadfs 0>, |
180 | <&clk_s_d2_quadfs 0>; | 180 | <&clk_s_d2_quadfs 0>; |
181 | ranges; | 181 | }; |
182 | 182 | ||
183 | sti-hdmi@8d04000 { | 183 | sti-hdmi@8d04000 { |
184 | compatible = "st,stih407-hdmi"; | 184 | compatible = "st,stih407-hdmi"; |
185 | reg = <0x8d04000 0x1000>; | 185 | reg = <0x8d04000 0x1000>; |
186 | reg-names = "hdmi-reg"; | 186 | reg-names = "hdmi-reg"; |
187 | interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; | 187 | interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; |
188 | interrupt-names = "irq"; | 188 | interrupt-names = "irq"; |
189 | clock-names = "pix", | 189 | clock-names = "pix", |
190 | "tmds", | 190 | "tmds", |
191 | "phy", | 191 | "phy", |
192 | "audio", | 192 | "audio", |
193 | "main_parent", | 193 | "main_parent", |
194 | "aux_parent"; | 194 | "aux_parent"; |
195 | 195 | ||
196 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, | 196 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, |
197 | <&clk_s_d2_flexgen CLK_TMDS_HDMI>, | 197 | <&clk_s_d2_flexgen CLK_TMDS_HDMI>, |
198 | <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, | 198 | <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, |
199 | <&clk_s_d0_flexgen CLK_PCM_0>, | 199 | <&clk_s_d0_flexgen CLK_PCM_0>, |
200 | <&clk_s_d2_quadfs 0>, | 200 | <&clk_s_d2_quadfs 0>, |
201 | <&clk_s_d2_quadfs 1>; | 201 | <&clk_s_d2_quadfs 1>; |
202 | 202 | ||
203 | hdmi,hpd-gpio = <&pio5 3>; | 203 | hdmi,hpd-gpio = <&pio5 3>; |
204 | reset-names = "hdmi"; | 204 | reset-names = "hdmi"; |
205 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; | 205 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
206 | ddc = <&hdmiddc>; | 206 | ddc = <&hdmiddc>; |
207 | 207 | }; | |
208 | }; | 208 | |
209 | 209 | sti-hda@8d02000 { | |
210 | sti-hda@8d02000 { | 210 | compatible = "st,stih407-hda"; |
211 | compatible = "st,stih407-hda"; | 211 | reg = <0x8d02000 0x400>, <0x92b0120 0x4>; |
212 | reg = <0x8d02000 0x400>, <0x92b0120 0x4>; | 212 | reg-names = "hda-reg", "video-dacs-ctrl"; |
213 | reg-names = "hda-reg", "video-dacs-ctrl"; | 213 | clock-names = "pix", |
214 | clock-names = "pix", | 214 | "hddac", |
215 | "hddac", | 215 | "main_parent", |
216 | "main_parent", | 216 | "aux_parent"; |
217 | "aux_parent"; | 217 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, |
218 | clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, | 218 | <&clk_s_d2_flexgen CLK_HDDAC>, |
219 | <&clk_s_d2_flexgen CLK_HDDAC>, | 219 | <&clk_s_d2_quadfs 0>, |
220 | <&clk_s_d2_quadfs 0>, | 220 | <&clk_s_d2_quadfs 1>; |
221 | <&clk_s_d2_quadfs 1>; | ||
222 | }; | ||
223 | }; | 221 | }; |
224 | }; | 222 | }; |
225 | 223 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 2bebaa286f9a..391230c3dc93 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -107,7 +107,7 @@ | |||
107 | 720000 1200000 | 107 | 720000 1200000 |
108 | 528000 1100000 | 108 | 528000 1100000 |
109 | 312000 1000000 | 109 | 312000 1000000 |
110 | 144000 900000 | 110 | 144000 1000000 |
111 | >; | 111 | >; |
112 | #cooling-cells = <2>; | 112 | #cooling-cells = <2>; |
113 | cooling-min-level = <0>; | 113 | cooling-min-level = <0>; |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 96dabcb6c621..996aed3b4eee 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -95,7 +95,7 @@ void it8152_init_irq(void) | |||
95 | } | 95 | } |
96 | } | 96 | } |
97 | 97 | ||
98 | void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) | 98 | void it8152_irq_demux(struct irq_desc *desc) |
99 | { | 99 | { |
100 | int bits_pd, bits_lp, bits_ld; | 100 | int bits_pd, bits_lp, bits_ld; |
101 | int i; | 101 | int i; |
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 304adea4bc52..0e97b4b871f9 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c | |||
@@ -138,7 +138,7 @@ static struct locomo_dev_info locomo_devices[] = { | |||
138 | }, | 138 | }, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | static void locomo_handler(unsigned int __irq, struct irq_desc *desc) | 141 | static void locomo_handler(struct irq_desc *desc) |
142 | { | 142 | { |
143 | struct locomo *lchip = irq_desc_get_chip_data(desc); | 143 | struct locomo *lchip = irq_desc_get_chip_data(desc); |
144 | int req, i; | 144 | int req, i; |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 4f290250fa93..3d224941b541 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -196,10 +196,8 @@ static struct sa1111_dev_info sa1111_devices[] = { | |||
196 | * active IRQs causes the interrupt output to pulse, the upper levels | 196 | * active IRQs causes the interrupt output to pulse, the upper levels |
197 | * will call us again if there are more interrupts to process. | 197 | * will call us again if there are more interrupts to process. |
198 | */ | 198 | */ |
199 | static void | 199 | static void sa1111_irq_handler(struct irq_desc *desc) |
200 | sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) | ||
201 | { | 200 | { |
202 | unsigned int irq = irq_desc_get_irq(desc); | ||
203 | unsigned int stat0, stat1, i; | 201 | unsigned int stat0, stat1, i; |
204 | struct sa1111 *sachip = irq_desc_get_handler_data(desc); | 202 | struct sa1111 *sachip = irq_desc_get_handler_data(desc); |
205 | void __iomem *mapbase = sachip->base + SA1111_INTC; | 203 | void __iomem *mapbase = sachip->base + SA1111_INTC; |
@@ -214,7 +212,7 @@ sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) | |||
214 | sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); | 212 | sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); |
215 | 213 | ||
216 | if (stat0 == 0 && stat1 == 0) { | 214 | if (stat0 == 0 && stat1 == 0) { |
217 | do_bad_IRQ(irq, desc); | 215 | do_bad_IRQ(desc); |
218 | return; | 216 | return; |
219 | } | 217 | } |
220 | 218 | ||
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 50c84e1876fc..3f15a5cae167 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -240,7 +240,8 @@ CONFIG_SSI_PROTOCOL=m | |||
240 | CONFIG_PINCTRL_SINGLE=y | 240 | CONFIG_PINCTRL_SINGLE=y |
241 | CONFIG_DEBUG_GPIO=y | 241 | CONFIG_DEBUG_GPIO=y |
242 | CONFIG_GPIO_SYSFS=y | 242 | CONFIG_GPIO_SYSFS=y |
243 | CONFIG_GPIO_PCF857X=m | 243 | CONFIG_GPIO_PCA953X=m |
244 | CONFIG_GPIO_PCF857X=y | ||
244 | CONFIG_GPIO_TWL4030=y | 245 | CONFIG_GPIO_TWL4030=y |
245 | CONFIG_GPIO_PALMAS=y | 246 | CONFIG_GPIO_PALMAS=y |
246 | CONFIG_W1=m | 247 | CONFIG_W1=m |
@@ -350,6 +351,8 @@ CONFIG_USB_MUSB_HDRC=m | |||
350 | CONFIG_USB_MUSB_OMAP2PLUS=m | 351 | CONFIG_USB_MUSB_OMAP2PLUS=m |
351 | CONFIG_USB_MUSB_AM35X=m | 352 | CONFIG_USB_MUSB_AM35X=m |
352 | CONFIG_USB_MUSB_DSPS=m | 353 | CONFIG_USB_MUSB_DSPS=m |
354 | CONFIG_USB_INVENTRA_DMA=y | ||
355 | CONFIG_USB_TI_CPPI41_DMA=y | ||
353 | CONFIG_USB_DWC3=m | 356 | CONFIG_USB_DWC3=m |
354 | CONFIG_USB_TEST=m | 357 | CONFIG_USB_TEST=m |
355 | CONFIG_AM335X_PHY_USB=y | 358 | CONFIG_AM335X_PHY_USB=y |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 7bbf325a4f31..b2bc8e11471d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) | |||
491 | #endif | 491 | #endif |
492 | .endm | 492 | .endm |
493 | 493 | ||
494 | .macro uaccess_save_and_disable, tmp | ||
495 | uaccess_save \tmp | ||
496 | uaccess_disable \tmp | ||
497 | .endm | ||
498 | |||
499 | .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo | 494 | .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo |
500 | .macro ret\c, reg | 495 | .macro ret\c, reg |
501 | #if __LINUX_ARM_ARCH__ < 6 | 496 | #if __LINUX_ARM_ARCH__ < 6 |
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index b274bde24905..e7335a92144e 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h | |||
@@ -40,6 +40,7 @@ do { \ | |||
40 | "2:\t.asciz " #__file "\n" \ | 40 | "2:\t.asciz " #__file "\n" \ |
41 | ".popsection\n" \ | 41 | ".popsection\n" \ |
42 | ".pushsection __bug_table,\"a\"\n" \ | 42 | ".pushsection __bug_table,\"a\"\n" \ |
43 | ".align 2\n" \ | ||
43 | "3:\t.word 1b, 2b\n" \ | 44 | "3:\t.word 1b, 2b\n" \ |
44 | "\t.hword " #__line ", 0\n" \ | 45 | "\t.hword " #__line ", 0\n" \ |
45 | ".popsection"); \ | 46 | ".popsection"); \ |
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index e878129f2fee..fc8ba1663601 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
14 | #include <asm/barrier.h> | 14 | #include <asm/barrier.h> |
15 | #include <asm/thread_info.h> | ||
15 | #endif | 16 | #endif |
16 | 17 | ||
17 | /* | 18 | /* |
@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void) | |||
89 | 90 | ||
90 | asm( | 91 | asm( |
91 | "mrc p15, 0, %0, c3, c0 @ get domain" | 92 | "mrc p15, 0, %0, c3, c0 @ get domain" |
92 | : "=r" (domain)); | 93 | : "=r" (domain) |
94 | : "m" (current_thread_info()->cpu_domain)); | ||
93 | 95 | ||
94 | return domain; | 96 | return domain; |
95 | } | 97 | } |
@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val) | |||
98 | { | 100 | { |
99 | asm volatile( | 101 | asm volatile( |
100 | "mcr p15, 0, %0, c3, c0 @ set domain" | 102 | "mcr p15, 0, %0, c3, c0 @ set domain" |
101 | : : "r" (val)); | 103 | : : "r" (val) : "memory"); |
102 | isb(); | 104 | isb(); |
103 | } | 105 | } |
104 | 106 | ||
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index d36a73d7c0e8..076777ff3daa 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h | |||
@@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address; | |||
106 | struct pci_dev; | 106 | struct pci_dev; |
107 | struct pci_sys_data; | 107 | struct pci_sys_data; |
108 | 108 | ||
109 | extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); | 109 | extern void it8152_irq_demux(struct irq_desc *desc); |
110 | extern void it8152_init_irq(void); | 110 | extern void it8152_init_irq(void); |
111 | extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | 111 | extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); |
112 | extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); | 112 | extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); |
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index af79da40af2a..9beb92914f4d 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h | |||
@@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq) | |||
11 | pr_crit("unexpected IRQ trap at vector %02x\n", irq); | 11 | pr_crit("unexpected IRQ trap at vector %02x\n", irq); |
12 | } | 12 | } |
13 | 13 | ||
14 | void set_irq_flags(unsigned int irq, unsigned int flags); | ||
15 | |||
16 | #define IRQF_VALID (1 << 0) | ||
17 | #define IRQF_PROBE (1 << 1) | ||
18 | #define IRQF_NOAUTOEN (1 << 2) | ||
19 | |||
20 | #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) | 14 | #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) |
21 | 15 | ||
22 | #endif | 16 | #endif |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index dcba0fa5176e..c4072d9f32c7 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -29,21 +29,18 @@ | |||
29 | 29 | ||
30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | 30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
31 | 31 | ||
32 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | ||
33 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | ||
34 | #else | ||
35 | #define KVM_MAX_VCPUS 0 | ||
36 | #endif | ||
37 | |||
38 | #define KVM_USER_MEM_SLOTS 32 | 32 | #define KVM_USER_MEM_SLOTS 32 |
39 | #define KVM_PRIVATE_MEM_SLOTS 4 | 33 | #define KVM_PRIVATE_MEM_SLOTS 4 |
40 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
41 | #define KVM_HAVE_ONE_REG | 35 | #define KVM_HAVE_ONE_REG |
36 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | ||
42 | 37 | ||
43 | #define KVM_VCPU_MAX_FEATURES 2 | 38 | #define KVM_VCPU_MAX_FEATURES 2 |
44 | 39 | ||
45 | #include <kvm/arm_vgic.h> | 40 | #include <kvm/arm_vgic.h> |
46 | 41 | ||
42 | #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS | ||
43 | |||
47 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | 44 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
48 | int __attribute_const__ kvm_target_cpu(void); | 45 | int __attribute_const__ kvm_target_cpu(void); |
49 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 46 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
@@ -148,6 +145,7 @@ struct kvm_vm_stat { | |||
148 | 145 | ||
149 | struct kvm_vcpu_stat { | 146 | struct kvm_vcpu_stat { |
150 | u32 halt_successful_poll; | 147 | u32 halt_successful_poll; |
148 | u32 halt_attempted_poll; | ||
151 | u32 halt_wakeup; | 149 | u32 halt_wakeup; |
152 | }; | 150 | }; |
153 | 151 | ||
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 2092ee1e1300..de4634b51456 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h | |||
@@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int); | |||
23 | /* | 23 | /* |
24 | * This is for easy migration, but should be changed in the source | 24 | * This is for easy migration, but should be changed in the source |
25 | */ | 25 | */ |
26 | #define do_bad_IRQ(irq,desc) \ | 26 | #define do_bad_IRQ(desc) \ |
27 | do { \ | 27 | do { \ |
28 | raw_spin_lock(&desc->lock); \ | 28 | raw_spin_lock(&desc->lock); \ |
29 | handle_bad_irq(irq, desc); \ | 29 | handle_bad_irq(desc); \ |
30 | raw_spin_unlock(&desc->lock); \ | 30 | raw_spin_unlock(&desc->lock); \ |
31 | } while(0) | 31 | } while(0) |
32 | 32 | ||
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d0a1119dcaf3..776757d1604a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -25,7 +25,6 @@ | |||
25 | struct task_struct; | 25 | struct task_struct; |
26 | 26 | ||
27 | #include <asm/types.h> | 27 | #include <asm/types.h> |
28 | #include <asm/domain.h> | ||
29 | 28 | ||
30 | typedef unsigned long mm_segment_t; | 29 | typedef unsigned long mm_segment_t; |
31 | 30 | ||
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 32640c431a08..7cba573c2cc9 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -19,7 +19,7 @@ | |||
19 | * This may need to be greater than __NR_last_syscall+1 in order to | 19 | * This may need to be greater than __NR_last_syscall+1 in order to |
20 | * account for the padding in the syscall table | 20 | * account for the padding in the syscall table |
21 | */ | 21 | */ |
22 | #define __NR_syscalls (388) | 22 | #define __NR_syscalls (392) |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * *NOTE*: This is a ghost syscall private to the kernel. Only the | 25 | * *NOTE*: This is a ghost syscall private to the kernel. Only the |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 0c3f5a0dafd3..7a2a32a1d5a8 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
@@ -414,6 +414,8 @@ | |||
414 | #define __NR_memfd_create (__NR_SYSCALL_BASE+385) | 414 | #define __NR_memfd_create (__NR_SYSCALL_BASE+385) |
415 | #define __NR_bpf (__NR_SYSCALL_BASE+386) | 415 | #define __NR_bpf (__NR_SYSCALL_BASE+386) |
416 | #define __NR_execveat (__NR_SYSCALL_BASE+387) | 416 | #define __NR_execveat (__NR_SYSCALL_BASE+387) |
417 | #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) | ||
418 | #define __NR_membarrier (__NR_SYSCALL_BASE+389) | ||
417 | 419 | ||
418 | /* | 420 | /* |
419 | * The following SWIs are ARM private. | 421 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 05745eb838c5..fde6c88d560c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -397,6 +397,8 @@ | |||
397 | /* 385 */ CALL(sys_memfd_create) | 397 | /* 385 */ CALL(sys_memfd_create) |
398 | CALL(sys_bpf) | 398 | CALL(sys_bpf) |
399 | CALL(sys_execveat) | 399 | CALL(sys_execveat) |
400 | CALL(sys_userfaultfd) | ||
401 | CALL(sys_membarrier) | ||
400 | #ifndef syscalls_counted | 402 | #ifndef syscalls_counted |
401 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 403 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
402 | #define syscalls_counted | 404 | #define syscalls_counted |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 5ff4826cb154..2766183e69df 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -79,26 +79,6 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
79 | handle_IRQ(irq, regs); | 79 | handle_IRQ(irq, regs); |
80 | } | 80 | } |
81 | 81 | ||
82 | void set_irq_flags(unsigned int irq, unsigned int iflags) | ||
83 | { | ||
84 | unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
85 | |||
86 | if (irq >= nr_irqs) { | ||
87 | pr_err("Trying to set irq flags for IRQ%d\n", irq); | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | if (iflags & IRQF_VALID) | ||
92 | clr |= IRQ_NOREQUEST; | ||
93 | if (iflags & IRQF_PROBE) | ||
94 | clr |= IRQ_NOPROBE; | ||
95 | if (!(iflags & IRQF_NOAUTOEN)) | ||
96 | clr |= IRQ_NOAUTOEN; | ||
97 | /* Order is clear bits in "clr" then set bits in "set" */ | ||
98 | irq_modify_status(irq, clr, set & ~clr); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(set_irq_flags); | ||
101 | |||
102 | void __init init_IRQ(void) | 82 | void __init init_IRQ(void) |
103 | { | 83 | { |
104 | int ret; | 84 | int ret; |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index a6ad93c9bce3..fd9eefce0a7b 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) | |||
259 | if (err) | 259 | if (err) |
260 | return err; | 260 | return err; |
261 | 261 | ||
262 | patch_text((void *)bpt->bpt_addr, | 262 | /* Machine is already stopped, so we can use __patch_text() directly */ |
263 | *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); | 263 | __patch_text((void *)bpt->bpt_addr, |
264 | *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); | ||
264 | 265 | ||
265 | return err; | 266 | return err; |
266 | } | 267 | } |
267 | 268 | ||
268 | int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) | 269 | int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) |
269 | { | 270 | { |
270 | patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); | 271 | /* Machine is already stopped, so we can use __patch_text() directly */ |
272 | __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); | ||
271 | 273 | ||
272 | return 0; | 274 | return 0; |
273 | } | 275 | } |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index a3089bacb8d8..7a7c4cea5523 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
226 | 226 | ||
227 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); | 227 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); |
228 | 228 | ||
229 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
229 | /* | 230 | /* |
230 | * Copy the initial value of the domain access control register | 231 | * Copy the initial value of the domain access control register |
231 | * from the current thread: thread->addr_limit will have been | 232 | * from the current thread: thread->addr_limit will have been |
@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
233 | * kernel/fork.c | 234 | * kernel/fork.c |
234 | */ | 235 | */ |
235 | thread->cpu_domain = get_domain(); | 236 | thread->cpu_domain = get_domain(); |
237 | #endif | ||
236 | 238 | ||
237 | if (likely(!(p->flags & PF_KTHREAD))) { | 239 | if (likely(!(p->flags & PF_KTHREAD))) { |
238 | *childregs = *current_pt_regs(); | 240 | *childregs = *current_pt_regs(); |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index b6cda06b455f..7b8f2141427b 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -343,15 +343,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
343 | */ | 343 | */ |
344 | thumb = handler & 1; | 344 | thumb = handler & 1; |
345 | 345 | ||
346 | #if __LINUX_ARM_ARCH__ >= 7 | ||
347 | /* | 346 | /* |
348 | * Clear the If-Then Thumb-2 execution state | 347 | * Clear the If-Then Thumb-2 execution state. ARM spec |
349 | * ARM spec requires this to be all 000s in ARM mode | 348 | * requires this to be all 000s in ARM mode. Snapdragon |
350 | * Snapdragon S4/Krait misbehaves on a Thumb=>ARM | 349 | * S4/Krait misbehaves on a Thumb=>ARM signal transition |
351 | * signal transition without this. | 350 | * without this. |
351 | * | ||
352 | * We must do this whenever we are running on a Thumb-2 | ||
353 | * capable CPU, which includes ARMv6T2. However, we elect | ||
354 | * to always do this to simplify the code; this field is | ||
355 | * marked UNK/SBZP for older architectures. | ||
352 | */ | 356 | */ |
353 | cpsr &= ~PSR_IT_MASK; | 357 | cpsr &= ~PSR_IT_MASK; |
354 | #endif | ||
355 | 358 | ||
356 | if (thumb) { | 359 | if (thumb) { |
357 | cpsr |= PSR_T_BIT; | 360 | cpsr |= PSR_T_BIT; |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index bfb915d05665..210eccadb69a 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
@@ -45,15 +45,4 @@ config KVM_ARM_HOST | |||
45 | ---help--- | 45 | ---help--- |
46 | Provides host support for ARM processors. | 46 | Provides host support for ARM processors. |
47 | 47 | ||
48 | config KVM_ARM_MAX_VCPUS | ||
49 | int "Number maximum supported virtual CPUs per VM" | ||
50 | depends on KVM_ARM_HOST | ||
51 | default 4 | ||
52 | help | ||
53 | Static number of max supported virtual CPUs per VM. | ||
54 | |||
55 | If you choose a high number, the vcpu structures will be quite | ||
56 | large, so only choose a reasonable number that you expect to | ||
57 | actually use. | ||
58 | |||
59 | endif # VIRTUALIZATION | 48 | endif # VIRTUALIZATION |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index ce404a5c3062..dc017adfddc8 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -446,7 +446,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | |||
446 | * Map the VGIC hardware resources before running a vcpu the first | 446 | * Map the VGIC hardware resources before running a vcpu the first |
447 | * time on this VM. | 447 | * time on this VM. |
448 | */ | 448 | */ |
449 | if (unlikely(!vgic_ready(kvm))) { | 449 | if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { |
450 | ret = kvm_vgic_map_resources(kvm); | 450 | ret = kvm_vgic_map_resources(kvm); |
451 | if (ret) | 451 | if (ret) |
452 | return ret; | 452 | return ret; |
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 702740d37465..51a59504bef4 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 ) | |||
515 | 515 | ||
516 | mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL | 516 | mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL |
517 | str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] | 517 | str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] |
518 | bic r2, #1 @ Clear ENABLE | 518 | |
519 | mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL | ||
520 | isb | 519 | isb |
521 | 520 | ||
522 | mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL | 521 | mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL |
@@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 ) | |||
529 | mcrr p15, 4, r2, r2, c14 @ CNTVOFF | 528 | mcrr p15, 4, r2, r2, c14 @ CNTVOFF |
530 | 529 | ||
531 | 1: | 530 | 1: |
531 | mov r2, #0 @ Clear ENABLE | ||
532 | mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL | ||
533 | |||
532 | @ Allow physical timer/counter access for the host | 534 | @ Allow physical timer/counter access for the host |
533 | mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL | 535 | mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL |
534 | orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) | 536 | orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 7b4201294187..6984342da13d 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1792 | if (vma->vm_flags & VM_PFNMAP) { | 1792 | if (vma->vm_flags & VM_PFNMAP) { |
1793 | gpa_t gpa = mem->guest_phys_addr + | 1793 | gpa_t gpa = mem->guest_phys_addr + |
1794 | (vm_start - mem->userspace_addr); | 1794 | (vm_start - mem->userspace_addr); |
1795 | phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + | 1795 | phys_addr_t pa; |
1796 | vm_start - vma->vm_start; | 1796 | |
1797 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | ||
1798 | pa += vm_start - vma->vm_start; | ||
1797 | 1799 | ||
1798 | /* IO region dirty page logging not allowed */ | 1800 | /* IO region dirty page logging not allowed */ |
1799 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) | 1801 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 4b94b513168d..ad6f6424f1d1 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
126 | 126 | ||
127 | static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | 127 | static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) |
128 | { | 128 | { |
129 | int i; | 129 | int i, matching_cpus = 0; |
130 | unsigned long mpidr; | 130 | unsigned long mpidr; |
131 | unsigned long target_affinity; | 131 | unsigned long target_affinity; |
132 | unsigned long target_affinity_mask; | 132 | unsigned long target_affinity_mask; |
@@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | |||
151 | */ | 151 | */ |
152 | kvm_for_each_vcpu(i, tmp, kvm) { | 152 | kvm_for_each_vcpu(i, tmp, kvm) { |
153 | mpidr = kvm_vcpu_get_mpidr_aff(tmp); | 153 | mpidr = kvm_vcpu_get_mpidr_aff(tmp); |
154 | if (((mpidr & target_affinity_mask) == target_affinity) && | 154 | if ((mpidr & target_affinity_mask) == target_affinity) { |
155 | !tmp->arch.pause) { | 155 | matching_cpus++; |
156 | return PSCI_0_2_AFFINITY_LEVEL_ON; | 156 | if (!tmp->arch.pause) |
157 | return PSCI_0_2_AFFINITY_LEVEL_ON; | ||
157 | } | 158 | } |
158 | } | 159 | } |
159 | 160 | ||
161 | if (!matching_cpus) | ||
162 | return PSCI_RET_INVALID_PARAMS; | ||
163 | |||
160 | return PSCI_0_2_AFFINITY_LEVEL_OFF; | 164 | return PSCI_0_2_AFFINITY_LEVEL_OFF; |
161 | } | 165 | } |
162 | 166 | ||
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 305d7c6242bb..bfb3703357c5 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c | |||
@@ -69,14 +69,14 @@ static struct irq_chip pmu_irq_chip = { | |||
69 | .irq_ack = pmu_irq_ack, | 69 | .irq_ack = pmu_irq_ack, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc) | 72 | static void pmu_irq_handler(struct irq_desc *desc) |
73 | { | 73 | { |
74 | unsigned int irq = irq_desc_get_irq(desc); | ||
75 | unsigned long cause = readl(PMU_INTERRUPT_CAUSE); | 74 | unsigned long cause = readl(PMU_INTERRUPT_CAUSE); |
75 | unsigned int irq; | ||
76 | 76 | ||
77 | cause &= readl(PMU_INTERRUPT_MASK); | 77 | cause &= readl(PMU_INTERRUPT_MASK); |
78 | if (cause == 0) { | 78 | if (cause == 0) { |
79 | do_bad_IRQ(irq, desc); | 79 | do_bad_IRQ(desc); |
80 | return; | 80 | return; |
81 | } | 81 | } |
82 | 82 | ||
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9bdf54795f05..56978199c479 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
21 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
22 | #include <asm/mcpm.h> | 22 | #include <asm/mcpm.h> |
23 | #include <asm/smp_plat.h> | ||
23 | 24 | ||
24 | #include "regs-pmu.h" | 25 | #include "regs-pmu.h" |
25 | #include "common.h" | 26 | #include "common.h" |
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) | |||
70 | cluster >= EXYNOS5420_NR_CLUSTERS) | 71 | cluster >= EXYNOS5420_NR_CLUSTERS) |
71 | return -EINVAL; | 72 | return -EINVAL; |
72 | 73 | ||
73 | exynos_cpu_power_up(cpunr); | 74 | if (!exynos_cpu_power_state(cpunr)) { |
75 | exynos_cpu_power_up(cpunr); | ||
76 | |||
77 | /* | ||
78 | * This assumes the cluster number of the big cores(Cortex A15) | ||
79 | * is 0 and the Little cores(Cortex A7) is 1. | ||
80 | * When the system was booted from the Little core, | ||
81 | * they should be reset during power up cpu. | ||
82 | */ | ||
83 | if (cluster && | ||
84 | cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { | ||
85 | /* | ||
86 | * Before we reset the Little cores, we should wait | ||
87 | * the SPARE2 register is set to 1 because the init | ||
88 | * codes of the iROM will set the register after | ||
89 | * initialization. | ||
90 | */ | ||
91 | while (!pmu_raw_readl(S5P_PMU_SPARE2)) | ||
92 | udelay(10); | ||
93 | |||
94 | pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), | ||
95 | EXYNOS_SWRESET); | ||
96 | } | ||
97 | } | ||
98 | |||
74 | return 0; | 99 | return 0; |
75 | } | 100 | } |
76 | 101 | ||
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h index b7614333d296..fba9068ed260 100644 --- a/arch/arm/mach-exynos/regs-pmu.h +++ b/arch/arm/mach-exynos/regs-pmu.h | |||
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr) | |||
513 | #define SPREAD_ENABLE 0xF | 513 | #define SPREAD_ENABLE 0xF |
514 | #define SPREAD_USE_STANDWFI 0xF | 514 | #define SPREAD_USE_STANDWFI 0xF |
515 | 515 | ||
516 | #define EXYNOS5420_KFC_CORE_RESET0 BIT(8) | ||
517 | #define EXYNOS5420_KFC_ETM_RESET0 BIT(20) | ||
518 | |||
519 | #define EXYNOS5420_KFC_CORE_RESET(_nr) \ | ||
520 | ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) | ||
521 | |||
516 | #define EXYNOS5420_BB_CON1 0x0784 | 522 | #define EXYNOS5420_BB_CON1 0x0784 |
517 | #define EXYNOS5420_BB_SEL_EN BIT(31) | 523 | #define EXYNOS5420_BB_SEL_EN BIT(31) |
518 | #define EXYNOS5420_BB_PMOS_EN BIT(7) | 524 | #define EXYNOS5420_BB_PMOS_EN BIT(7) |
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c index fcd79bc3a3e1..c01fca11b224 100644 --- a/arch/arm/mach-footbridge/isa-irq.c +++ b/arch/arm/mach-footbridge/isa-irq.c | |||
@@ -87,13 +87,12 @@ static struct irq_chip isa_hi_chip = { | |||
87 | .irq_unmask = isa_unmask_pic_hi_irq, | 87 | .irq_unmask = isa_unmask_pic_hi_irq, |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static void | 90 | static void isa_irq_handler(struct irq_desc *desc) |
91 | isa_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
92 | { | 91 | { |
93 | unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; | 92 | unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; |
94 | 93 | ||
95 | if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { | 94 | if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { |
96 | do_bad_IRQ(isa_irq, desc); | 95 | do_bad_IRQ(desc); |
97 | return; | 96 | return; |
98 | } | 97 | } |
99 | 98 | ||
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index 220333ed741d..2478d9f4d92d 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c | |||
@@ -126,7 +126,7 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type) | |||
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 129 | static void gpio_irq_handler(struct irq_desc *desc) |
130 | { | 130 | { |
131 | unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); | 131 | unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); |
132 | unsigned int gpio_irq_no, irq_stat; | 132 | unsigned int gpio_irq_no, irq_stat; |
diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c index 45903be6e7b3..16496a071ecb 100644 --- a/arch/arm/mach-imx/3ds_debugboard.c +++ b/arch/arm/mach-imx/3ds_debugboard.c | |||
@@ -85,7 +85,7 @@ static struct platform_device smsc_lan9217_device = { | |||
85 | .resource = smsc911x_resources, | 85 | .resource = smsc911x_resources, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) | 88 | static void mxc_expio_irq_handler(struct irq_desc *desc) |
89 | { | 89 | { |
90 | u32 imr_val; | 90 | u32 imr_val; |
91 | u32 int_valid; | 91 | u32 int_valid; |
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index 2c0853560bd2..2b147e4bf9c9 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c | |||
@@ -154,7 +154,7 @@ static inline void mxc_init_imx_uart(void) | |||
154 | imx31_add_imx_uart0(&uart_pdata); | 154 | imx31_add_imx_uart0(&uart_pdata); |
155 | } | 155 | } |
156 | 156 | ||
157 | static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc) | 157 | static void mx31ads_expio_irq_handler(struct irq_desc *desc) |
158 | { | 158 | { |
159 | u32 imr_val; | 159 | u32 imr_val; |
160 | u32 int_valid; | 160 | u32 int_valid; |
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index 9f89e76dfbb9..f6235b28578c 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c | |||
@@ -91,7 +91,7 @@ static void (*write_imipr[])(u32) = { | |||
91 | write_imipr_3, | 91 | write_imipr_3, |
92 | }; | 92 | }; |
93 | 93 | ||
94 | static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) | 94 | static void iop13xx_msi_handler(struct irq_desc *desc) |
95 | { | 95 | { |
96 | int i, j; | 96 | int i, j; |
97 | unsigned long status; | 97 | unsigned long status; |
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index cce4cef12b6e..2ae431e8bc1b 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c | |||
@@ -370,7 +370,7 @@ static struct irq_chip lpc32xx_irq_chip = { | |||
370 | .irq_set_wake = lpc32xx_irq_wake | 370 | .irq_set_wake = lpc32xx_irq_wake |
371 | }; | 371 | }; |
372 | 372 | ||
373 | static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) | 373 | static void lpc32xx_sic1_handler(struct irq_desc *desc) |
374 | { | 374 | { |
375 | unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE)); | 375 | unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE)); |
376 | 376 | ||
@@ -383,7 +383,7 @@ static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) | |||
383 | } | 383 | } |
384 | } | 384 | } |
385 | 385 | ||
386 | static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc) | 386 | static void lpc32xx_sic2_handler(struct irq_desc *desc) |
387 | { | 387 | { |
388 | unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE)); | 388 | unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE)); |
389 | 389 | ||
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 6373e2bff203..842302df99c1 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c | |||
@@ -69,8 +69,7 @@ static struct platform_device *devices[] __initdata = { | |||
69 | #define DEBUG_IRQ(fmt...) while (0) {} | 69 | #define DEBUG_IRQ(fmt...) while (0) {} |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | static void | 72 | static void netx_hif_demux_handler(struct irq_desc *desc) |
73 | netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc) | ||
74 | { | 73 | { |
75 | unsigned int irq = NETX_IRQ_HIF_CHAINED(0); | 74 | unsigned int irq = NETX_IRQ_HIF_CHAINED(0); |
76 | unsigned int stat; | 75 | unsigned int stat; |
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c index dfec671b1639..39e20d0ead08 100644 --- a/arch/arm/mach-omap1/fpga.c +++ b/arch/arm/mach-omap1/fpga.c | |||
@@ -87,7 +87,7 @@ static void fpga_mask_ack_irq(struct irq_data *d) | |||
87 | fpga_ack_irq(d); | 87 | fpga_ack_irq(d); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc) | 90 | static void innovator_fpga_IRQ_demux(struct irq_desc *desc) |
91 | { | 91 | { |
92 | u32 stat; | 92 | u32 stat; |
93 | int fpga_irq; | 93 | int fpga_irq; |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 07d2e100caab..b3a0dff67e3f 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -44,10 +44,11 @@ config SOC_OMAP5 | |||
44 | select ARM_CPU_SUSPEND if PM | 44 | select ARM_CPU_SUSPEND if PM |
45 | select ARM_GIC | 45 | select ARM_GIC |
46 | select HAVE_ARM_SCU if SMP | 46 | select HAVE_ARM_SCU if SMP |
47 | select HAVE_ARM_TWD if SMP | ||
48 | select HAVE_ARM_ARCH_TIMER | 47 | select HAVE_ARM_ARCH_TIMER |
49 | select ARM_ERRATA_798181 if SMP | 48 | select ARM_ERRATA_798181 if SMP |
49 | select OMAP_INTERCONNECT | ||
50 | select OMAP_INTERCONNECT_BARRIER | 50 | select OMAP_INTERCONNECT_BARRIER |
51 | select PM_OPP if PM | ||
51 | 52 | ||
52 | config SOC_AM33XX | 53 | config SOC_AM33XX |
53 | bool "TI AM33XX" | 54 | bool "TI AM33XX" |
@@ -70,10 +71,13 @@ config SOC_DRA7XX | |||
70 | select ARCH_OMAP2PLUS | 71 | select ARCH_OMAP2PLUS |
71 | select ARM_CPU_SUSPEND if PM | 72 | select ARM_CPU_SUSPEND if PM |
72 | select ARM_GIC | 73 | select ARM_GIC |
74 | select HAVE_ARM_SCU if SMP | ||
73 | select HAVE_ARM_ARCH_TIMER | 75 | select HAVE_ARM_ARCH_TIMER |
74 | select IRQ_CROSSBAR | 76 | select IRQ_CROSSBAR |
75 | select ARM_ERRATA_798181 if SMP | 77 | select ARM_ERRATA_798181 if SMP |
78 | select OMAP_INTERCONNECT | ||
76 | select OMAP_INTERCONNECT_BARRIER | 79 | select OMAP_INTERCONNECT_BARRIER |
80 | select PM_OPP if PM | ||
77 | 81 | ||
78 | config ARCH_OMAP2PLUS | 82 | config ARCH_OMAP2PLUS |
79 | bool | 83 | bool |
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 24c9afc9e8a7..6133eaac685d 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c | |||
@@ -20,13 +20,6 @@ | |||
20 | 20 | ||
21 | #include "common.h" | 21 | #include "common.h" |
22 | 22 | ||
23 | #if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)) | ||
24 | #define intc_of_init NULL | ||
25 | #endif | ||
26 | #ifndef CONFIG_ARCH_OMAP4 | ||
27 | #define gic_of_init NULL | ||
28 | #endif | ||
29 | |||
30 | static const struct of_device_id omap_dt_match_table[] __initconst = { | 23 | static const struct of_device_id omap_dt_match_table[] __initconst = { |
31 | { .compatible = "simple-bus", }, | 24 | { .compatible = "simple-bus", }, |
32 | { .compatible = "ti,omap-infra", }, | 25 | { .compatible = "ti,omap-infra", }, |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index e3f713ffb06b..54a5ba54d2ff 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -653,8 +653,12 @@ void __init dra7xxx_check_revision(void) | |||
653 | omap_revision = DRA752_REV_ES1_0; | 653 | omap_revision = DRA752_REV_ES1_0; |
654 | break; | 654 | break; |
655 | case 1: | 655 | case 1: |
656 | default: | ||
657 | omap_revision = DRA752_REV_ES1_1; | 656 | omap_revision = DRA752_REV_ES1_1; |
657 | break; | ||
658 | case 2: | ||
659 | default: | ||
660 | omap_revision = DRA752_REV_ES2_0; | ||
661 | break; | ||
658 | } | 662 | } |
659 | break; | 663 | break; |
660 | 664 | ||
@@ -674,7 +678,7 @@ void __init dra7xxx_check_revision(void) | |||
674 | /* Unknown default to latest silicon rev as default*/ | 678 | /* Unknown default to latest silicon rev as default*/ |
675 | pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", | 679 | pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", |
676 | __func__, idcode, hawkeye, rev); | 680 | __func__, idcode, hawkeye, rev); |
677 | omap_revision = DRA752_REV_ES1_1; | 681 | omap_revision = DRA752_REV_ES2_0; |
678 | } | 682 | } |
679 | 683 | ||
680 | sprintf(soc_name, "DRA%03x", omap_rev() >> 16); | 684 | sprintf(soc_name, "DRA%03x", omap_rev() >> 16); |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 980c9372e6fd..3eaeaca5da05 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -676,6 +676,7 @@ void __init am43xx_init_early(void) | |||
676 | void __init am43xx_init_late(void) | 676 | void __init am43xx_init_late(void) |
677 | { | 677 | { |
678 | omap_common_late_init(); | 678 | omap_common_late_init(); |
679 | omap2_clk_enable_autoidle_all(); | ||
679 | } | 680 | } |
680 | #endif | 681 | #endif |
681 | 682 | ||
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 4cb8fd9f741f..72ebc4c16bae 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -901,7 +901,8 @@ static int __init omap_device_late_idle(struct device *dev, void *data) | |||
901 | if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) | 901 | if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) |
902 | return 0; | 902 | return 0; |
903 | 903 | ||
904 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { | 904 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER && |
905 | od->_driver_status != BUS_NOTIFY_BIND_DRIVER) { | ||
905 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { | 906 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { |
906 | dev_warn(dev, "%s: enabled but no driver. Idling\n", | 907 | dev_warn(dev, "%s: enabled but no driver. Idling\n", |
907 | __func__); | 908 | __func__); |
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 425bfcd67db6..b668719b9b25 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h | |||
@@ -103,7 +103,8 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { } | |||
103 | #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0) | 103 | #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0) |
104 | #define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1) | 104 | #define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1) |
105 | 105 | ||
106 | #if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4) | 106 | #if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\ |
107 | defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)) | ||
107 | extern u16 pm44xx_errata; | 108 | extern u16 pm44xx_errata; |
108 | #define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id)) | 109 | #define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id)) |
109 | #else | 110 | #else |
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 257e98c26618..3fc2cbe52113 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c | |||
@@ -102,7 +102,7 @@ static void omap_prcm_events_filter_priority(unsigned long *events, | |||
102 | * dispatched accordingly. Clearing of the wakeup events should be | 102 | * dispatched accordingly. Clearing of the wakeup events should be |
103 | * done by the SoC specific individual handlers. | 103 | * done by the SoC specific individual handlers. |
104 | */ | 104 | */ |
105 | static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) | 105 | static void omap_prcm_irq_handler(struct irq_desc *desc) |
106 | { | 106 | { |
107 | unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; | 107 | unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; |
108 | unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; | 108 | unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; |
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index f97654d11ea5..2d1d3845253c 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h | |||
@@ -469,6 +469,8 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
469 | #define DRA7XX_CLASS 0x07000000 | 469 | #define DRA7XX_CLASS 0x07000000 |
470 | #define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) | 470 | #define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) |
471 | #define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) | 471 | #define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) |
472 | #define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8)) | ||
473 | #define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) | ||
472 | #define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) | 474 | #define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) |
473 | 475 | ||
474 | void omap2xxx_check_revision(void); | 476 | void omap2xxx_check_revision(void); |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e4d8701f99f9..a55655127ef2 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -297,12 +297,8 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |||
297 | if (IS_ERR(src)) | 297 | if (IS_ERR(src)) |
298 | return PTR_ERR(src); | 298 | return PTR_ERR(src); |
299 | 299 | ||
300 | r = clk_set_parent(timer->fclk, src); | 300 | WARN(clk_set_parent(timer->fclk, src) < 0, |
301 | if (r < 0) { | 301 | "Cannot set timer parent clock, no PLL clock driver?"); |
302 | pr_warn("%s: %s cannot set source\n", __func__, oh->name); | ||
303 | clk_put(src); | ||
304 | return r; | ||
305 | } | ||
306 | 302 | ||
307 | clk_put(src); | 303 | clk_put(src); |
308 | 304 | ||
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index e5a35f6b83a7..d44d311704ba 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c | |||
@@ -300,7 +300,7 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) | |||
300 | 300 | ||
301 | val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); | 301 | val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); |
302 | if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || | 302 | if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || |
303 | (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) { | 303 | (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) { |
304 | val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; | 304 | val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; |
305 | val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; | 305 | val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; |
306 | pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", | 306 | pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", |
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index 70366b35d299..a727282bfa99 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c | |||
@@ -496,13 +496,13 @@ static struct irq_chip balloon3_irq_chip = { | |||
496 | .irq_unmask = balloon3_unmask_irq, | 496 | .irq_unmask = balloon3_unmask_irq, |
497 | }; | 497 | }; |
498 | 498 | ||
499 | static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc) | 499 | static void balloon3_irq_handler(struct irq_desc *desc) |
500 | { | 500 | { |
501 | unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & | 501 | unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & |
502 | balloon3_irq_enabled; | 502 | balloon3_irq_enabled; |
503 | do { | 503 | do { |
504 | struct irq_data *d = irq_desc_get_irq_data(desc); | 504 | struct irq_data *d = irq_desc_get_irq_data(desc); |
505 | struct irq_chip *chip = irq_data_get_chip(d); | 505 | struct irq_chip *chip = irq_desc_get_chip(desc); |
506 | unsigned int irq; | 506 | unsigned int irq; |
507 | 507 | ||
508 | /* clear useless edge notification */ | 508 | /* clear useless edge notification */ |
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c index 1fa79f1f832d..3221ae15bef7 100644 --- a/arch/arm/mach-pxa/cm-x2xx-pci.c +++ b/arch/arm/mach-pxa/cm-x2xx-pci.c | |||
@@ -29,13 +29,12 @@ | |||
29 | void __iomem *it8152_base_address; | 29 | void __iomem *it8152_base_address; |
30 | static int cmx2xx_it8152_irq_gpio; | 30 | static int cmx2xx_it8152_irq_gpio; |
31 | 31 | ||
32 | static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc) | 32 | static void cmx2xx_it8152_irq_demux(struct irq_desc *desc) |
33 | { | 33 | { |
34 | unsigned int irq = irq_desc_get_irq(desc); | ||
35 | /* clear our parent irq */ | 34 | /* clear our parent irq */ |
36 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 35 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
37 | 36 | ||
38 | it8152_irq_demux(irq, desc); | 37 | it8152_irq_demux(desc); |
39 | } | 38 | } |
40 | 39 | ||
41 | void __cmx2xx_pci_init_irq(int irq_gpio) | 40 | void __cmx2xx_pci_init_irq(int irq_gpio) |
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h index d28fe291233a..07b93fd24474 100644 --- a/arch/arm/mach-pxa/include/mach/addr-map.h +++ b/arch/arm/mach-pxa/include/mach/addr-map.h | |||
@@ -44,6 +44,13 @@ | |||
44 | */ | 44 | */ |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * DFI Bus for NAND, PXA3xx only | ||
48 | */ | ||
49 | #define NAND_PHYS 0x43100000 | ||
50 | #define NAND_VIRT IOMEM(0xf6300000) | ||
51 | #define NAND_SIZE 0x00100000 | ||
52 | |||
53 | /* | ||
47 | * Internal Memory Controller (PXA27x and later) | 54 | * Internal Memory Controller (PXA27x and later) |
48 | */ | 55 | */ |
49 | #define IMEMC_PHYS 0x58000000 | 56 | #define IMEMC_PHYS 0x58000000 |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index b070167deef2..4823d972e647 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -120,7 +120,7 @@ static struct irq_chip lpd270_irq_chip = { | |||
120 | .irq_unmask = lpd270_unmask_irq, | 120 | .irq_unmask = lpd270_unmask_irq, |
121 | }; | 121 | }; |
122 | 122 | ||
123 | static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc) | 123 | static void lpd270_irq_handler(struct irq_desc *desc) |
124 | { | 124 | { |
125 | unsigned int irq; | 125 | unsigned int irq; |
126 | unsigned long pending; | 126 | unsigned long pending; |
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 9a0c8affdadb..d8319b54299a 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c | |||
@@ -284,7 +284,7 @@ static struct irq_chip pcm990_irq_chip = { | |||
284 | .irq_unmask = pcm990_unmask_irq, | 284 | .irq_unmask = pcm990_unmask_irq, |
285 | }; | 285 | }; |
286 | 286 | ||
287 | static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc) | 287 | static void pcm990_irq_handler(struct irq_desc *desc) |
288 | { | 288 | { |
289 | unsigned int irq; | 289 | unsigned int irq; |
290 | unsigned long pending; | 290 | unsigned long pending; |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index ce0f8d6242e2..06005d3f2ba3 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -47,6 +47,13 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); | |||
47 | #define ISRAM_START 0x5c000000 | 47 | #define ISRAM_START 0x5c000000 |
48 | #define ISRAM_SIZE SZ_256K | 48 | #define ISRAM_SIZE SZ_256K |
49 | 49 | ||
50 | /* | ||
51 | * NAND NFC: DFI bus arbitration subset | ||
52 | */ | ||
53 | #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0)) | ||
54 | #define NDCR_ND_ARB_EN (1 << 12) | ||
55 | #define NDCR_ND_ARB_CNTL (1 << 19) | ||
56 | |||
50 | static void __iomem *sram; | 57 | static void __iomem *sram; |
51 | static unsigned long wakeup_src; | 58 | static unsigned long wakeup_src; |
52 | 59 | ||
@@ -362,7 +369,12 @@ static struct map_desc pxa3xx_io_desc[] __initdata = { | |||
362 | .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), | 369 | .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), |
363 | .length = SMEMC_SIZE, | 370 | .length = SMEMC_SIZE, |
364 | .type = MT_DEVICE | 371 | .type = MT_DEVICE |
365 | } | 372 | }, { |
373 | .virtual = (unsigned long)NAND_VIRT, | ||
374 | .pfn = __phys_to_pfn(NAND_PHYS), | ||
375 | .length = NAND_SIZE, | ||
376 | .type = MT_DEVICE | ||
377 | }, | ||
366 | }; | 378 | }; |
367 | 379 | ||
368 | void __init pxa3xx_map_io(void) | 380 | void __init pxa3xx_map_io(void) |
@@ -419,6 +431,13 @@ static int __init pxa3xx_init(void) | |||
419 | */ | 431 | */ |
420 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); | 432 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); |
421 | 433 | ||
434 | /* | ||
435 | * Disable DFI bus arbitration, to prevent a system bus lock if | ||
436 | * somebody disables the NAND clock (unused clock) while this | ||
437 | * bit remains set. | ||
438 | */ | ||
439 | NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL; | ||
440 | |||
422 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) | 441 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) |
423 | return ret; | 442 | return ret; |
424 | 443 | ||
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 4841d6cefe76..8ab26370107e 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -276,7 +276,7 @@ static inline unsigned long viper_irq_pending(void) | |||
276 | viper_irq_enabled_mask; | 276 | viper_irq_enabled_mask; |
277 | } | 277 | } |
278 | 278 | ||
279 | static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc) | 279 | static void viper_irq_handler(struct irq_desc *desc) |
280 | { | 280 | { |
281 | unsigned int irq; | 281 | unsigned int irq; |
282 | unsigned long pending; | 282 | unsigned long pending; |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 6f94dd7b4dee..30e62a3f0701 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -105,7 +105,7 @@ static inline unsigned long zeus_irq_pending(void) | |||
105 | return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; | 105 | return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; |
106 | } | 106 | } |
107 | 107 | ||
108 | static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc) | 108 | static void zeus_irq_handler(struct irq_desc *desc) |
109 | { | 109 | { |
110 | unsigned int irq; | 110 | unsigned int irq; |
111 | unsigned long pending; | 111 | unsigned long pending; |
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index f726d4c4e6dd..dc67a7fb3831 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c | |||
@@ -551,8 +551,7 @@ static void ecard_check_lockup(struct irq_desc *desc) | |||
551 | } | 551 | } |
552 | } | 552 | } |
553 | 553 | ||
554 | static void | 554 | static void ecard_irq_handler(struct irq_desc *desc) |
555 | ecard_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
556 | { | 555 | { |
557 | ecard_t *ec; | 556 | ecard_t *ec; |
558 | int called = 0; | 557 | int called = 0; |
diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c index ced1ab86ac83..2bb08961e934 100644 --- a/arch/arm/mach-s3c24xx/bast-irq.c +++ b/arch/arm/mach-s3c24xx/bast-irq.c | |||
@@ -100,9 +100,7 @@ static struct irq_chip bast_pc104_chip = { | |||
100 | .irq_ack = bast_pc104_maskack | 100 | .irq_ack = bast_pc104_maskack |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static void | 103 | static void bast_irq_pc104_demux(struct irq_desc *desc) |
104 | bast_irq_pc104_demux(unsigned int irq, | ||
105 | struct irq_desc *desc) | ||
106 | { | 104 | { |
107 | unsigned int stat; | 105 | unsigned int stat; |
108 | unsigned int irqno; | 106 | unsigned int irqno; |
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index fd63ecfb2f81..ddb30b8434c5 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c | |||
@@ -388,22 +388,22 @@ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end) | |||
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc) | 391 | static void s3c_irq_demux_eint0_3(struct irq_desc *desc) |
392 | { | 392 | { |
393 | s3c_irq_demux_eint(0, 3); | 393 | s3c_irq_demux_eint(0, 3); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc) | 396 | static void s3c_irq_demux_eint4_11(struct irq_desc *desc) |
397 | { | 397 | { |
398 | s3c_irq_demux_eint(4, 11); | 398 | s3c_irq_demux_eint(4, 11); |
399 | } | 399 | } |
400 | 400 | ||
401 | static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc) | 401 | static void s3c_irq_demux_eint12_19(struct irq_desc *desc) |
402 | { | 402 | { |
403 | s3c_irq_demux_eint(12, 19); | 403 | s3c_irq_demux_eint(12, 19); |
404 | } | 404 | } |
405 | 405 | ||
406 | static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc) | 406 | static void s3c_irq_demux_eint20_27(struct irq_desc *desc) |
407 | { | 407 | { |
408 | s3c_irq_demux_eint(20, 27); | 408 | s3c_irq_demux_eint(20, 27); |
409 | } | 409 | } |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 6d237b4f7a8e..8411985af9ff 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -166,7 +166,7 @@ static struct sa1100_port_fns neponset_port_fns = { | |||
166 | * ensure that the IRQ signal is deasserted before returning. This | 166 | * ensure that the IRQ signal is deasserted before returning. This |
167 | * is rather unfortunate. | 167 | * is rather unfortunate. |
168 | */ | 168 | */ |
169 | static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc) | 169 | static void neponset_irq_handler(struct irq_desc *desc) |
170 | { | 170 | { |
171 | struct neponset_drvdata *d = irq_desc_get_handler_data(desc); | 171 | struct neponset_drvdata *d = irq_desc_get_handler_data(desc); |
172 | unsigned int irr; | 172 | unsigned int irr; |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 9769f1eefe3b..00b7f7de28a1 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -365,15 +365,21 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r | |||
365 | user: | 365 | user: |
366 | if (LDST_L_BIT(instr)) { | 366 | if (LDST_L_BIT(instr)) { |
367 | unsigned long val; | 367 | unsigned long val; |
368 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
369 | |||
368 | get16t_unaligned_check(val, addr); | 370 | get16t_unaligned_check(val, addr); |
371 | uaccess_restore(__ua_flags); | ||
369 | 372 | ||
370 | /* signed half-word? */ | 373 | /* signed half-word? */ |
371 | if (instr & 0x40) | 374 | if (instr & 0x40) |
372 | val = (signed long)((signed short) val); | 375 | val = (signed long)((signed short) val); |
373 | 376 | ||
374 | regs->uregs[rd] = val; | 377 | regs->uregs[rd] = val; |
375 | } else | 378 | } else { |
379 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
376 | put16t_unaligned_check(regs->uregs[rd], addr); | 380 | put16t_unaligned_check(regs->uregs[rd], addr); |
381 | uaccess_restore(__ua_flags); | ||
382 | } | ||
377 | 383 | ||
378 | return TYPE_LDST; | 384 | return TYPE_LDST; |
379 | 385 | ||
@@ -420,14 +426,21 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, | |||
420 | 426 | ||
421 | user: | 427 | user: |
422 | if (load) { | 428 | if (load) { |
423 | unsigned long val; | 429 | unsigned long val, val2; |
430 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
431 | |||
424 | get32t_unaligned_check(val, addr); | 432 | get32t_unaligned_check(val, addr); |
433 | get32t_unaligned_check(val2, addr + 4); | ||
434 | |||
435 | uaccess_restore(__ua_flags); | ||
436 | |||
425 | regs->uregs[rd] = val; | 437 | regs->uregs[rd] = val; |
426 | get32t_unaligned_check(val, addr + 4); | 438 | regs->uregs[rd2] = val2; |
427 | regs->uregs[rd2] = val; | ||
428 | } else { | 439 | } else { |
440 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
429 | put32t_unaligned_check(regs->uregs[rd], addr); | 441 | put32t_unaligned_check(regs->uregs[rd], addr); |
430 | put32t_unaligned_check(regs->uregs[rd2], addr + 4); | 442 | put32t_unaligned_check(regs->uregs[rd2], addr + 4); |
443 | uaccess_restore(__ua_flags); | ||
431 | } | 444 | } |
432 | 445 | ||
433 | return TYPE_LDST; | 446 | return TYPE_LDST; |
@@ -458,10 +471,15 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg | |||
458 | trans: | 471 | trans: |
459 | if (LDST_L_BIT(instr)) { | 472 | if (LDST_L_BIT(instr)) { |
460 | unsigned int val; | 473 | unsigned int val; |
474 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
461 | get32t_unaligned_check(val, addr); | 475 | get32t_unaligned_check(val, addr); |
476 | uaccess_restore(__ua_flags); | ||
462 | regs->uregs[rd] = val; | 477 | regs->uregs[rd] = val; |
463 | } else | 478 | } else { |
479 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
464 | put32t_unaligned_check(regs->uregs[rd], addr); | 480 | put32t_unaligned_check(regs->uregs[rd], addr); |
481 | uaccess_restore(__ua_flags); | ||
482 | } | ||
465 | return TYPE_LDST; | 483 | return TYPE_LDST; |
466 | 484 | ||
467 | fault: | 485 | fault: |
@@ -531,6 +549,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg | |||
531 | #endif | 549 | #endif |
532 | 550 | ||
533 | if (user_mode(regs)) { | 551 | if (user_mode(regs)) { |
552 | unsigned int __ua_flags = uaccess_save_and_enable(); | ||
534 | for (regbits = REGMASK_BITS(instr), rd = 0; regbits; | 553 | for (regbits = REGMASK_BITS(instr), rd = 0; regbits; |
535 | regbits >>= 1, rd += 1) | 554 | regbits >>= 1, rd += 1) |
536 | if (regbits & 1) { | 555 | if (regbits & 1) { |
@@ -542,6 +561,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg | |||
542 | put32t_unaligned_check(regs->uregs[rd], eaddr); | 561 | put32t_unaligned_check(regs->uregs[rd], eaddr); |
543 | eaddr += 4; | 562 | eaddr += 4; |
544 | } | 563 | } |
564 | uaccess_restore(__ua_flags); | ||
545 | } else { | 565 | } else { |
546 | for (regbits = REGMASK_BITS(instr), rd = 0; regbits; | 566 | for (regbits = REGMASK_BITS(instr), rd = 0; regbits; |
547 | regbits >>= 1, rd += 1) | 567 | regbits >>= 1, rd += 1) |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e62604384945..1a7815e5421b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1249,7 +1249,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | |||
1249 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1249 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1250 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 1250 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
1251 | dma_addr_t dma_addr, iova; | 1251 | dma_addr_t dma_addr, iova; |
1252 | int i, ret = DMA_ERROR_CODE; | 1252 | int i; |
1253 | 1253 | ||
1254 | dma_addr = __alloc_iova(mapping, size); | 1254 | dma_addr = __alloc_iova(mapping, size); |
1255 | if (dma_addr == DMA_ERROR_CODE) | 1255 | if (dma_addr == DMA_ERROR_CODE) |
@@ -1257,6 +1257,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | |||
1257 | 1257 | ||
1258 | iova = dma_addr; | 1258 | iova = dma_addr; |
1259 | for (i = 0; i < count; ) { | 1259 | for (i = 0; i < count; ) { |
1260 | int ret; | ||
1261 | |||
1260 | unsigned int next_pfn = page_to_pfn(pages[i]) + 1; | 1262 | unsigned int next_pfn = page_to_pfn(pages[i]) + 1; |
1261 | phys_addr_t phys = page_to_phys(pages[i]); | 1263 | phys_addr_t phys = page_to_phys(pages[i]); |
1262 | unsigned int len, j; | 1264 | unsigned int len, j; |
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S index 71df43547659..39c20afad7ed 100644 --- a/arch/arm/nwfpe/entry.S +++ b/arch/arm/nwfpe/entry.S | |||
@@ -95,9 +95,10 @@ emulate: | |||
95 | reteq r4 @ no, return failure | 95 | reteq r4 @ no, return failure |
96 | 96 | ||
97 | next: | 97 | next: |
98 | uaccess_enable r3 | ||
98 | .Lx1: ldrt r6, [r5], #4 @ get the next instruction and | 99 | .Lx1: ldrt r6, [r5], #4 @ get the next instruction and |
99 | @ increment PC | 100 | @ increment PC |
100 | 101 | uaccess_disable r3 | |
101 | and r2, r6, #0x0F000000 @ test for FP insns | 102 | and r2, r6, #0x0F000000 @ test for FP insns |
102 | teq r2, #0x0C000000 | 103 | teq r2, #0x0C000000 |
103 | teqne r2, #0x0D000000 | 104 | teqne r2, #0x0D000000 |
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 79c33eca09a3..7bd22d8e5b11 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -407,7 +407,7 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) | |||
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | 409 | ||
410 | static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc) | 410 | static void gpio_irq_handler(struct irq_desc *desc) |
411 | { | 411 | { |
412 | struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); | 412 | struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); |
413 | u32 cause, type; | 413 | u32 cause, type; |
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ad9529cc4203..daa1a65f2eb7 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c | |||
@@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = { | |||
107 | { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP }, | 107 | { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP }, |
108 | { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP }, | 108 | { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP }, |
109 | { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP }, | 109 | { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP }, |
110 | { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP }, | ||
111 | { }, | 110 | { }, |
112 | }; | 111 | }; |
113 | MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); | 112 | MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); |
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index f00e08075938..10fd99c568c6 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S | |||
@@ -98,8 +98,23 @@ ENTRY(privcmd_call) | |||
98 | mov r1, r2 | 98 | mov r1, r2 |
99 | mov r2, r3 | 99 | mov r2, r3 |
100 | ldr r3, [sp, #8] | 100 | ldr r3, [sp, #8] |
101 | /* | ||
102 | * Privcmd calls are issued by the userspace. We need to allow the | ||
103 | * kernel to access the userspace memory before issuing the hypercall. | ||
104 | */ | ||
105 | uaccess_enable r4 | ||
106 | |||
107 | /* r4 is loaded now as we use it as scratch register before */ | ||
101 | ldr r4, [sp, #4] | 108 | ldr r4, [sp, #4] |
102 | __HVC(XEN_IMM) | 109 | __HVC(XEN_IMM) |
110 | |||
111 | /* | ||
112 | * Disable userspace access from kernel. This is fine to do it | ||
113 | * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is | ||
114 | * called before. | ||
115 | */ | ||
116 | uaccess_disable r4 | ||
117 | |||
103 | ldm sp!, {r4} | 118 | ldm sp!, {r4} |
104 | ret lr | 119 | ret lr |
105 | ENDPROC(privcmd_call); | 120 | ENDPROC(privcmd_call); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7d95663c0160..07d1811aa03f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -32,6 +32,7 @@ config ARM64 | |||
32 | select GENERIC_CLOCKEVENTS_BROADCAST | 32 | select GENERIC_CLOCKEVENTS_BROADCAST |
33 | select GENERIC_CPU_AUTOPROBE | 33 | select GENERIC_CPU_AUTOPROBE |
34 | select GENERIC_EARLY_IOREMAP | 34 | select GENERIC_EARLY_IOREMAP |
35 | select GENERIC_IDLE_POLL_SETUP | ||
35 | select GENERIC_IRQ_PROBE | 36 | select GENERIC_IRQ_PROBE |
36 | select GENERIC_IRQ_SHOW | 37 | select GENERIC_IRQ_SHOW |
37 | select GENERIC_IRQ_SHOW_LEVEL | 38 | select GENERIC_IRQ_SHOW_LEVEL |
@@ -331,6 +332,22 @@ config ARM64_ERRATUM_845719 | |||
331 | 332 | ||
332 | If unsure, say Y. | 333 | If unsure, say Y. |
333 | 334 | ||
335 | config ARM64_ERRATUM_843419 | ||
336 | bool "Cortex-A53: 843419: A load or store might access an incorrect address" | ||
337 | depends on MODULES | ||
338 | default y | ||
339 | help | ||
340 | This option builds kernel modules using the large memory model in | ||
341 | order to avoid the use of the ADRP instruction, which can cause | ||
342 | a subsequent memory access to use an incorrect address on Cortex-A53 | ||
343 | parts up to r0p4. | ||
344 | |||
345 | Note that the kernel itself must be linked with a version of ld | ||
346 | which fixes potentially affected ADRP instructions through the | ||
347 | use of veneers. | ||
348 | |||
349 | If unsure, say Y. | ||
350 | |||
334 | endmenu | 351 | endmenu |
335 | 352 | ||
336 | 353 | ||
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 15ff5b4156fd..d10b5d483022 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -41,6 +41,10 @@ endif | |||
41 | 41 | ||
42 | CHECKFLAGS += -D__aarch64__ | 42 | CHECKFLAGS += -D__aarch64__ |
43 | 43 | ||
44 | ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) | ||
45 | KBUILD_CFLAGS_MODULE += -mcmodel=large | ||
46 | endif | ||
47 | |||
44 | # Default value | 48 | # Default value |
45 | head-y := arch/arm64/kernel/head.o | 49 | head-y := arch/arm64/kernel/head.o |
46 | 50 | ||
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index d18ee4259ee5..06a15644be38 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi | |||
@@ -81,7 +81,7 @@ | |||
81 | }; | 81 | }; |
82 | 82 | ||
83 | idle-states { | 83 | idle-states { |
84 | entry-method = "arm,psci"; | 84 | entry-method = "psci"; |
85 | 85 | ||
86 | CPU_SLEEP_0: cpu-sleep-0 { | 86 | CPU_SLEEP_0: cpu-sleep-0 { |
87 | compatible = "arm,idle-state"; | 87 | compatible = "arm,idle-state"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index a712bea3bf2c..cc093a482aa4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
@@ -106,7 +106,7 @@ | |||
106 | }; | 106 | }; |
107 | 107 | ||
108 | idle-states { | 108 | idle-states { |
109 | entry-method = "arm,psci"; | 109 | entry-method = "psci"; |
110 | 110 | ||
111 | cpu_sleep: cpu-sleep-0 { | 111 | cpu_sleep: cpu-sleep-0 { |
112 | compatible = "arm,idle-state"; | 112 | compatible = "arm,idle-state"; |
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 2bb7009bdac7..a57601f9d17c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h | |||
@@ -43,9 +43,4 @@ static inline void ack_bad_irq(unsigned int irq) | |||
43 | irq_err_count++; | 43 | irq_err_count++; |
44 | } | 44 | } |
45 | 45 | ||
46 | /* | ||
47 | * No arch-specific IRQ flags. | ||
48 | */ | ||
49 | #define set_irq_flags(irq, flags) | ||
50 | |||
51 | #endif /* __ASM_HARDIRQ_H */ | 46 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 7605e095217f..9694f2654593 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -95,6 +95,7 @@ | |||
95 | SCTLR_EL2_SA | SCTLR_EL2_I) | 95 | SCTLR_EL2_SA | SCTLR_EL2_I) |
96 | 96 | ||
97 | /* TCR_EL2 Registers bits */ | 97 | /* TCR_EL2 Registers bits */ |
98 | #define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) | ||
98 | #define TCR_EL2_TBI (1 << 20) | 99 | #define TCR_EL2_TBI (1 << 20) |
99 | #define TCR_EL2_PS (7 << 16) | 100 | #define TCR_EL2_PS (7 << 16) |
100 | #define TCR_EL2_PS_40B (2 << 16) | 101 | #define TCR_EL2_PS_40B (2 << 16) |
@@ -106,9 +107,10 @@ | |||
106 | #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ | 107 | #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ |
107 | TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) | 108 | TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) |
108 | 109 | ||
109 | #define TCR_EL2_FLAGS (TCR_EL2_PS_40B) | 110 | #define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B) |
110 | 111 | ||
111 | /* VTCR_EL2 Registers bits */ | 112 | /* VTCR_EL2 Registers bits */ |
113 | #define VTCR_EL2_RES1 (1 << 31) | ||
112 | #define VTCR_EL2_PS_MASK (7 << 16) | 114 | #define VTCR_EL2_PS_MASK (7 << 16) |
113 | #define VTCR_EL2_TG0_MASK (1 << 14) | 115 | #define VTCR_EL2_TG0_MASK (1 << 14) |
114 | #define VTCR_EL2_TG0_4K (0 << 14) | 116 | #define VTCR_EL2_TG0_4K (0 << 14) |
@@ -147,7 +149,8 @@ | |||
147 | */ | 149 | */ |
148 | #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ | 150 | #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ |
149 | VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ | 151 | VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ |
150 | VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) | 152 | VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ |
153 | VTCR_EL2_RES1) | ||
151 | #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) | 154 | #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) |
152 | #else | 155 | #else |
153 | /* | 156 | /* |
@@ -158,7 +161,8 @@ | |||
158 | */ | 161 | */ |
159 | #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ | 162 | #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ |
160 | VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ | 163 | VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ |
161 | VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) | 164 | VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ |
165 | VTCR_EL2_RES1) | ||
162 | #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) | 166 | #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) |
163 | #endif | 167 | #endif |
164 | 168 | ||
@@ -168,7 +172,6 @@ | |||
168 | #define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) | 172 | #define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) |
169 | 173 | ||
170 | /* Hyp System Trap Register */ | 174 | /* Hyp System Trap Register */ |
171 | #define HSTR_EL2_TTEE (1 << 16) | ||
172 | #define HSTR_EL2_T(x) (1 << x) | 175 | #define HSTR_EL2_T(x) (1 << x) |
173 | 176 | ||
174 | /* Hyp Coproccessor Trap Register Shifts */ | 177 | /* Hyp Coproccessor Trap Register Shifts */ |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 67fa0de3d483..5e377101f919 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -53,9 +53,7 @@ | |||
53 | #define IFSR32_EL2 25 /* Instruction Fault Status Register */ | 53 | #define IFSR32_EL2 25 /* Instruction Fault Status Register */ |
54 | #define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ | 54 | #define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ |
55 | #define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ | 55 | #define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ |
56 | #define TEECR32_EL1 28 /* ThumbEE Configuration Register */ | 56 | #define NR_SYS_REGS 28 |
57 | #define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */ | ||
58 | #define NR_SYS_REGS 30 | ||
59 | 57 | ||
60 | /* 32bit mapping */ | 58 | /* 32bit mapping */ |
61 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | 59 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 415938dc45cf..ed039688c221 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -30,19 +30,16 @@ | |||
30 | 30 | ||
31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | 31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
32 | 32 | ||
33 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | ||
34 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | ||
35 | #else | ||
36 | #define KVM_MAX_VCPUS 0 | ||
37 | #endif | ||
38 | |||
39 | #define KVM_USER_MEM_SLOTS 32 | 33 | #define KVM_USER_MEM_SLOTS 32 |
40 | #define KVM_PRIVATE_MEM_SLOTS 4 | 34 | #define KVM_PRIVATE_MEM_SLOTS 4 |
41 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 35 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
36 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | ||
42 | 37 | ||
43 | #include <kvm/arm_vgic.h> | 38 | #include <kvm/arm_vgic.h> |
44 | #include <kvm/arm_arch_timer.h> | 39 | #include <kvm/arm_arch_timer.h> |
45 | 40 | ||
41 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS | ||
42 | |||
46 | #define KVM_VCPU_MAX_FEATURES 3 | 43 | #define KVM_VCPU_MAX_FEATURES 3 |
47 | 44 | ||
48 | int __attribute_const__ kvm_target_cpu(void); | 45 | int __attribute_const__ kvm_target_cpu(void); |
@@ -195,6 +192,7 @@ struct kvm_vm_stat { | |||
195 | 192 | ||
196 | struct kvm_vcpu_stat { | 193 | struct kvm_vcpu_stat { |
197 | u32 halt_successful_poll; | 194 | u32 halt_successful_poll; |
195 | u32 halt_attempted_poll; | ||
198 | u32 halt_wakeup; | 196 | u32 halt_wakeup; |
199 | }; | 197 | }; |
200 | 198 | ||
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6900b2d95371..26b066690593 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -26,13 +26,9 @@ | |||
26 | * Software defined PTE bits definition. | 26 | * Software defined PTE bits definition. |
27 | */ | 27 | */ |
28 | #define PTE_VALID (_AT(pteval_t, 1) << 0) | 28 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
29 | #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ | ||
29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
31 | #ifdef CONFIG_ARM64_HW_AFDBM | ||
32 | #define PTE_WRITE (PTE_DBM) /* same as DBM */ | ||
33 | #else | ||
34 | #define PTE_WRITE (_AT(pteval_t, 1) << 57) | ||
35 | #endif | ||
36 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ | 32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
37 | 33 | ||
38 | /* | 34 | /* |
@@ -83,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
83 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 79 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
84 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) | 80 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
85 | 81 | ||
86 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) | 82 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
87 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) | 83 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
88 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) | 84 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
89 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 85 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
@@ -146,7 +142,7 @@ extern struct page *empty_zero_page; | |||
146 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 142 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
147 | 143 | ||
148 | #ifdef CONFIG_ARM64_HW_AFDBM | 144 | #ifdef CONFIG_ARM64_HW_AFDBM |
149 | #define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY)) | 145 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
150 | #else | 146 | #else |
151 | #define pte_hw_dirty(pte) (0) | 147 | #define pte_hw_dirty(pte) (0) |
152 | #endif | 148 | #endif |
@@ -238,7 +234,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | |||
238 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via | 234 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via |
239 | * the page fault mechanism. Checking the dirty status of a pte becomes: | 235 | * the page fault mechanism. Checking the dirty status of a pte becomes: |
240 | * | 236 | * |
241 | * PTE_DIRTY || !PTE_RDONLY | 237 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
242 | */ | 238 | */ |
243 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 239 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
244 | pte_t *ptep, pte_t pte) | 240 | pte_t *ptep, pte_t pte) |
@@ -500,10 +496,10 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) | |||
500 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 496 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
501 | { | 497 | { |
502 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 498 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
503 | PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; | 499 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
504 | /* preserve the hardware dirty information */ | 500 | /* preserve the hardware dirty information */ |
505 | if (pte_hw_dirty(pte)) | 501 | if (pte_hw_dirty(pte)) |
506 | newprot |= PTE_DIRTY; | 502 | pte = pte_mkdirty(pte); |
507 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 503 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
508 | return pte; | 504 | return pte; |
509 | } | 505 | } |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 3bc498c250dc..41e58fe3c041 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
@@ -44,7 +44,7 @@ | |||
44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
46 | 46 | ||
47 | #define __NR_compat_syscalls 388 | 47 | #define __NR_compat_syscalls 390 |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index cef934a90f17..5b925b761a2a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) | |||
797 | __SYSCALL(__NR_bpf, sys_bpf) | 797 | __SYSCALL(__NR_bpf, sys_bpf) |
798 | #define __NR_execveat 387 | 798 | #define __NR_execveat 387 |
799 | __SYSCALL(__NR_execveat, compat_sys_execveat) | 799 | __SYSCALL(__NR_execveat, compat_sys_execveat) |
800 | #define __NR_userfaultfd 388 | ||
801 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) | ||
802 | #define __NR_membarrier 389 | ||
803 | __SYSCALL(__NR_membarrier, sys_membarrier) | ||
804 | |||
805 | /* | ||
806 | * Please add new compat syscalls above this comment and update | ||
807 | * __NR_compat_syscalls in asm/unistd.h. | ||
808 | */ | ||
diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h index 8d1e7236431b..991bf5db2ca1 100644 --- a/arch/arm64/include/uapi/asm/signal.h +++ b/arch/arm64/include/uapi/asm/signal.h | |||
@@ -19,6 +19,9 @@ | |||
19 | /* Required for AArch32 compatibility. */ | 19 | /* Required for AArch32 compatibility. */ |
20 | #define SA_RESTORER 0x04000000 | 20 | #define SA_RESTORER 0x04000000 |
21 | 21 | ||
22 | #define MINSIGSTKSZ 5120 | ||
23 | #define SIGSTKSZ 16384 | ||
24 | |||
22 | #include <asm-generic/signal.h> | 25 | #include <asm-generic/signal.h> |
23 | 26 | ||
24 | #endif | 27 | #endif |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 9b3b62ac9c24..253021ef2769 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -134,7 +134,7 @@ static int os_lock_notify(struct notifier_block *self, | |||
134 | unsigned long action, void *data) | 134 | unsigned long action, void *data) |
135 | { | 135 | { |
136 | int cpu = (unsigned long)data; | 136 | int cpu = (unsigned long)data; |
137 | if (action == CPU_ONLINE) | 137 | if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) |
138 | smp_call_function_single(cpu, clear_os_lock, NULL, 1); | 138 | smp_call_function_single(cpu, clear_os_lock, NULL, 1); |
139 | return NOTIFY_OK; | 139 | return NOTIFY_OK; |
140 | } | 140 | } |
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook) | |||
201 | } | 201 | } |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Call registered single step handers | 204 | * Call registered single step handlers |
205 | * There is no Syndrome info to check for determining the handler. | 205 | * There is no Syndrome info to check for determining the handler. |
206 | * So we call all the registered handlers, until the right handler is | 206 | * So we call all the registered handlers, until the right handler is |
207 | * found which returns zero. | 207 | * found which returns zero. |
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
271 | * Use reader/writer locks instead of plain spinlock. | 271 | * Use reader/writer locks instead of plain spinlock. |
272 | */ | 272 | */ |
273 | static LIST_HEAD(break_hook); | 273 | static LIST_HEAD(break_hook); |
274 | static DEFINE_RWLOCK(break_hook_lock); | 274 | static DEFINE_SPINLOCK(break_hook_lock); |
275 | 275 | ||
276 | void register_break_hook(struct break_hook *hook) | 276 | void register_break_hook(struct break_hook *hook) |
277 | { | 277 | { |
278 | write_lock(&break_hook_lock); | 278 | spin_lock(&break_hook_lock); |
279 | list_add(&hook->node, &break_hook); | 279 | list_add_rcu(&hook->node, &break_hook); |
280 | write_unlock(&break_hook_lock); | 280 | spin_unlock(&break_hook_lock); |
281 | } | 281 | } |
282 | 282 | ||
283 | void unregister_break_hook(struct break_hook *hook) | 283 | void unregister_break_hook(struct break_hook *hook) |
284 | { | 284 | { |
285 | write_lock(&break_hook_lock); | 285 | spin_lock(&break_hook_lock); |
286 | list_del(&hook->node); | 286 | list_del_rcu(&hook->node); |
287 | write_unlock(&break_hook_lock); | 287 | spin_unlock(&break_hook_lock); |
288 | synchronize_rcu(); | ||
288 | } | 289 | } |
289 | 290 | ||
290 | static int call_break_hook(struct pt_regs *regs, unsigned int esr) | 291 | static int call_break_hook(struct pt_regs *regs, unsigned int esr) |
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) | |||
292 | struct break_hook *hook; | 293 | struct break_hook *hook; |
293 | int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; | 294 | int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; |
294 | 295 | ||
295 | read_lock(&break_hook_lock); | 296 | rcu_read_lock(); |
296 | list_for_each_entry(hook, &break_hook, node) | 297 | list_for_each_entry_rcu(hook, &break_hook, node) |
297 | if ((esr & hook->esr_mask) == hook->esr_val) | 298 | if ((esr & hook->esr_mask) == hook->esr_val) |
298 | fn = hook->fn; | 299 | fn = hook->fn; |
299 | read_unlock(&break_hook_lock); | 300 | rcu_read_unlock(); |
300 | 301 | ||
301 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; | 302 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; |
302 | } | 303 | } |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..13671a9cf016 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void) | |||
258 | */ | 258 | */ |
259 | if (!is_normal_ram(md)) | 259 | if (!is_normal_ram(md)) |
260 | prot = __pgprot(PROT_DEVICE_nGnRE); | 260 | prot = __pgprot(PROT_DEVICE_nGnRE); |
261 | else if (md->type == EFI_RUNTIME_SERVICES_CODE) | 261 | else if (md->type == EFI_RUNTIME_SERVICES_CODE || |
262 | !PAGE_ALIGNED(md->phys_addr)) | ||
262 | prot = PAGE_KERNEL_EXEC; | 263 | prot = PAGE_KERNEL_EXEC; |
263 | else | 264 | else |
264 | prot = PAGE_KERNEL; | 265 | prot = PAGE_KERNEL; |
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub) | |||
178 | ENDPROC(ftrace_stub) | 178 | ENDPROC(ftrace_stub) |
179 | 179 | ||
180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
181 | /* save return value regs*/ | ||
182 | .macro save_return_regs | ||
183 | sub sp, sp, #64 | ||
184 | stp x0, x1, [sp] | ||
185 | stp x2, x3, [sp, #16] | ||
186 | stp x4, x5, [sp, #32] | ||
187 | stp x6, x7, [sp, #48] | ||
188 | .endm | ||
189 | |||
190 | /* restore return value regs*/ | ||
191 | .macro restore_return_regs | ||
192 | ldp x0, x1, [sp] | ||
193 | ldp x2, x3, [sp, #16] | ||
194 | ldp x4, x5, [sp, #32] | ||
195 | ldp x6, x7, [sp, #48] | ||
196 | add sp, sp, #64 | ||
197 | .endm | ||
198 | |||
181 | /* | 199 | /* |
182 | * void ftrace_graph_caller(void) | 200 | * void ftrace_graph_caller(void) |
183 | * | 201 | * |
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) | |||
204 | * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. | 222 | * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. |
205 | */ | 223 | */ |
206 | ENTRY(return_to_handler) | 224 | ENTRY(return_to_handler) |
207 | str x0, [sp, #-16]! | 225 | save_return_regs |
208 | mov x0, x29 // parent's fp | 226 | mov x0, x29 // parent's fp |
209 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); | 227 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); |
210 | mov x30, x0 // restore the original return address | 228 | mov x30, x0 // restore the original return address |
211 | ldr x0, [sp], #16 | 229 | restore_return_regs |
212 | ret | 230 | ret |
213 | END(return_to_handler) | 231 | END(return_to_handler) |
214 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 232 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a055be6125cf..90d09eddd5b2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -523,6 +523,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |||
523 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | 523 | msr hstr_el2, xzr // Disable CP15 traps to EL2 |
524 | #endif | 524 | #endif |
525 | 525 | ||
526 | /* EL2 debug */ | ||
527 | mrs x0, pmcr_el0 // Disable debug access traps | ||
528 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | ||
529 | msr mdcr_el2, x0 // all PMU counters from EL1 | ||
530 | |||
526 | /* Stage-2 translation */ | 531 | /* Stage-2 translation */ |
527 | msr vttbr_el2, xzr | 532 | msr vttbr_el2, xzr |
528 | 533 | ||
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index c97040ecf838..bba85c8f8037 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
@@ -872,7 +872,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self, | |||
872 | void *hcpu) | 872 | void *hcpu) |
873 | { | 873 | { |
874 | int cpu = (long)hcpu; | 874 | int cpu = (long)hcpu; |
875 | if (action == CPU_ONLINE) | 875 | if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) |
876 | smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); | 876 | smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); |
877 | return NOTIFY_OK; | 877 | return NOTIFY_OK; |
878 | } | 878 | } |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index f341866aa810..c08b9ad6f429 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn) | |||
85 | aarch64_insn_is_bcond(insn)); | 85 | aarch64_insn_is_bcond(insn)); |
86 | } | 86 | } |
87 | 87 | ||
88 | static DEFINE_SPINLOCK(patch_lock); | 88 | static DEFINE_RAW_SPINLOCK(patch_lock); |
89 | 89 | ||
90 | static void __kprobes *patch_map(void *addr, int fixmap) | 90 | static void __kprobes *patch_map(void *addr, int fixmap) |
91 | { | 91 | { |
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) | |||
131 | unsigned long flags = 0; | 131 | unsigned long flags = 0; |
132 | int ret; | 132 | int ret; |
133 | 133 | ||
134 | spin_lock_irqsave(&patch_lock, flags); | 134 | raw_spin_lock_irqsave(&patch_lock, flags); |
135 | waddr = patch_map(addr, FIX_TEXT_POKE0); | 135 | waddr = patch_map(addr, FIX_TEXT_POKE0); |
136 | 136 | ||
137 | ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); | 137 | ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); |
138 | 138 | ||
139 | patch_unmap(FIX_TEXT_POKE0); | 139 | patch_unmap(FIX_TEXT_POKE0); |
140 | spin_unlock_irqrestore(&patch_lock, flags); | 140 | raw_spin_unlock_irqrestore(&patch_lock, flags); |
141 | 141 | ||
142 | return ret; | 142 | return ret; |
143 | } | 143 | } |
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 67bf4107f6ef..876eb8df50bf 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c | |||
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
332 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, | 332 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, |
333 | AARCH64_INSN_IMM_ADR); | 333 | AARCH64_INSN_IMM_ADR); |
334 | break; | 334 | break; |
335 | #ifndef CONFIG_ARM64_ERRATUM_843419 | ||
335 | case R_AARCH64_ADR_PREL_PG_HI21_NC: | 336 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
336 | overflow_check = false; | 337 | overflow_check = false; |
337 | case R_AARCH64_ADR_PREL_PG_HI21: | 338 | case R_AARCH64_ADR_PREL_PG_HI21: |
338 | ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, | 339 | ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, |
339 | AARCH64_INSN_IMM_ADR); | 340 | AARCH64_INSN_IMM_ADR); |
340 | break; | 341 | break; |
342 | #endif | ||
341 | case R_AARCH64_ADD_ABS_LO12_NC: | 343 | case R_AARCH64_ADD_ABS_LO12_NC: |
342 | case R_AARCH64_LDST8_ABS_LO12_NC: | 344 | case R_AARCH64_LDST8_ABS_LO12_NC: |
343 | overflow_check = false; | 345 | overflow_check = false; |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6bab21f84a9f..232247945b1c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void) | |||
364 | to_free = ram_end - orig_start; | 364 | to_free = ram_end - orig_start; |
365 | 365 | ||
366 | size = orig_end - orig_start; | 366 | size = orig_end - orig_start; |
367 | if (!size) | ||
368 | return; | ||
367 | 369 | ||
368 | /* initrd needs to be relocated completely inside linear mapping */ | 370 | /* initrd needs to be relocated completely inside linear mapping */ |
369 | new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), | 371 | new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 948f0ad2de23..71ef6dc89ae5 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
212 | 212 | ||
213 | /* | 213 | /* |
214 | * VFP save/restore code. | 214 | * VFP save/restore code. |
215 | * | ||
216 | * We have to be careful with endianness, since the fpsimd context-switch | ||
217 | * code operates on 128-bit (Q) register values whereas the compat ABI | ||
218 | * uses an array of 64-bit (D) registers. Consequently, we need to swap | ||
219 | * the two halves of each Q register when running on a big-endian CPU. | ||
215 | */ | 220 | */ |
221 | union __fpsimd_vreg { | ||
222 | __uint128_t raw; | ||
223 | struct { | ||
224 | #ifdef __AARCH64EB__ | ||
225 | u64 hi; | ||
226 | u64 lo; | ||
227 | #else | ||
228 | u64 lo; | ||
229 | u64 hi; | ||
230 | #endif | ||
231 | }; | ||
232 | }; | ||
233 | |||
216 | static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) | 234 | static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) |
217 | { | 235 | { |
218 | struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; | 236 | struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; |
219 | compat_ulong_t magic = VFP_MAGIC; | 237 | compat_ulong_t magic = VFP_MAGIC; |
220 | compat_ulong_t size = VFP_STORAGE_SIZE; | 238 | compat_ulong_t size = VFP_STORAGE_SIZE; |
221 | compat_ulong_t fpscr, fpexc; | 239 | compat_ulong_t fpscr, fpexc; |
222 | int err = 0; | 240 | int i, err = 0; |
223 | 241 | ||
224 | /* | 242 | /* |
225 | * Save the hardware registers to the fpsimd_state structure. | 243 | * Save the hardware registers to the fpsimd_state structure. |
@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) | |||
235 | /* | 253 | /* |
236 | * Now copy the FP registers. Since the registers are packed, | 254 | * Now copy the FP registers. Since the registers are packed, |
237 | * we can copy the prefix we want (V0-V15) as it is. | 255 | * we can copy the prefix we want (V0-V15) as it is. |
238 | * FIXME: Won't work if big endian. | ||
239 | */ | 256 | */ |
240 | err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, | 257 | for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { |
241 | sizeof(frame->ufp.fpregs)); | 258 | union __fpsimd_vreg vreg = { |
259 | .raw = fpsimd->vregs[i >> 1], | ||
260 | }; | ||
261 | |||
262 | __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); | ||
263 | __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); | ||
264 | } | ||
242 | 265 | ||
243 | /* Create an AArch32 fpscr from the fpsr and the fpcr. */ | 266 | /* Create an AArch32 fpscr from the fpsr and the fpcr. */ |
244 | fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | | 267 | fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | |
@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) | |||
263 | compat_ulong_t magic = VFP_MAGIC; | 286 | compat_ulong_t magic = VFP_MAGIC; |
264 | compat_ulong_t size = VFP_STORAGE_SIZE; | 287 | compat_ulong_t size = VFP_STORAGE_SIZE; |
265 | compat_ulong_t fpscr; | 288 | compat_ulong_t fpscr; |
266 | int err = 0; | 289 | int i, err = 0; |
267 | 290 | ||
268 | __get_user_error(magic, &frame->magic, err); | 291 | __get_user_error(magic, &frame->magic, err); |
269 | __get_user_error(size, &frame->size, err); | 292 | __get_user_error(size, &frame->size, err); |
@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) | |||
273 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | 296 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) |
274 | return -EINVAL; | 297 | return -EINVAL; |
275 | 298 | ||
276 | /* | 299 | /* Copy the FP registers into the start of the fpsimd_state. */ |
277 | * Copy the FP registers into the start of the fpsimd_state. | 300 | for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { |
278 | * FIXME: Won't work if big endian. | 301 | union __fpsimd_vreg vreg; |
279 | */ | 302 | |
280 | err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, | 303 | __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); |
281 | sizeof(frame->ufp.fpregs)); | 304 | __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); |
305 | fpsimd.vregs[i >> 1] = vreg.raw; | ||
306 | } | ||
282 | 307 | ||
283 | /* Extract the fpsr and the fpcr from the fpscr */ | 308 | /* Extract the fpsr and the fpcr from the fpscr */ |
284 | __get_user_error(fpscr, &frame->ufp.fpscr, err); | 309 | __get_user_error(fpscr, &frame->ufp.fpscr, err); |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index bfffe8f4bd53..5c7e920e4861 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -41,15 +41,4 @@ config KVM_ARM_HOST | |||
41 | ---help--- | 41 | ---help--- |
42 | Provides host support for ARM processors. | 42 | Provides host support for ARM processors. |
43 | 43 | ||
44 | config KVM_ARM_MAX_VCPUS | ||
45 | int "Number maximum supported virtual CPUs per VM" | ||
46 | depends on KVM_ARM_HOST | ||
47 | default 4 | ||
48 | help | ||
49 | Static number of max supported virtual CPUs per VM. | ||
50 | |||
51 | If you choose a high number, the vcpu structures will be quite | ||
52 | large, so only choose a reasonable number that you expect to | ||
53 | actually use. | ||
54 | |||
55 | endif # VIRTUALIZATION | 44 | endif # VIRTUALIZATION |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 37c89ea2c572..e5836138ec42 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -433,20 +433,13 @@ | |||
433 | mrs x5, ifsr32_el2 | 433 | mrs x5, ifsr32_el2 |
434 | stp x4, x5, [x3] | 434 | stp x4, x5, [x3] |
435 | 435 | ||
436 | skip_fpsimd_state x8, 3f | 436 | skip_fpsimd_state x8, 2f |
437 | mrs x6, fpexc32_el2 | 437 | mrs x6, fpexc32_el2 |
438 | str x6, [x3, #16] | 438 | str x6, [x3, #16] |
439 | 3: | 439 | 2: |
440 | skip_debug_state x8, 2f | 440 | skip_debug_state x8, 1f |
441 | mrs x7, dbgvcr32_el2 | 441 | mrs x7, dbgvcr32_el2 |
442 | str x7, [x3, #24] | 442 | str x7, [x3, #24] |
443 | 2: | ||
444 | skip_tee_state x8, 1f | ||
445 | |||
446 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | ||
447 | mrs x4, teecr32_el1 | ||
448 | mrs x5, teehbr32_el1 | ||
449 | stp x4, x5, [x3] | ||
450 | 1: | 443 | 1: |
451 | .endm | 444 | .endm |
452 | 445 | ||
@@ -466,16 +459,9 @@ | |||
466 | msr dacr32_el2, x4 | 459 | msr dacr32_el2, x4 |
467 | msr ifsr32_el2, x5 | 460 | msr ifsr32_el2, x5 |
468 | 461 | ||
469 | skip_debug_state x8, 2f | 462 | skip_debug_state x8, 1f |
470 | ldr x7, [x3, #24] | 463 | ldr x7, [x3, #24] |
471 | msr dbgvcr32_el2, x7 | 464 | msr dbgvcr32_el2, x7 |
472 | 2: | ||
473 | skip_tee_state x8, 1f | ||
474 | |||
475 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | ||
476 | ldp x4, x5, [x3] | ||
477 | msr teecr32_el1, x4 | ||
478 | msr teehbr32_el1, x5 | ||
479 | 1: | 465 | 1: |
480 | .endm | 466 | .endm |
481 | 467 | ||
@@ -570,8 +556,6 @@ alternative_endif | |||
570 | mrs x3, cntv_ctl_el0 | 556 | mrs x3, cntv_ctl_el0 |
571 | and x3, x3, #3 | 557 | and x3, x3, #3 |
572 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | 558 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] |
573 | bic x3, x3, #1 // Clear Enable | ||
574 | msr cntv_ctl_el0, x3 | ||
575 | 559 | ||
576 | isb | 560 | isb |
577 | 561 | ||
@@ -579,6 +563,9 @@ alternative_endif | |||
579 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | 563 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] |
580 | 564 | ||
581 | 1: | 565 | 1: |
566 | // Disable the virtual timer | ||
567 | msr cntv_ctl_el0, xzr | ||
568 | |||
582 | // Allow physical timer/counter access for the host | 569 | // Allow physical timer/counter access for the host |
583 | mrs x2, cnthctl_el2 | 570 | mrs x2, cnthctl_el2 |
584 | orr x2, x2, #3 | 571 | orr x2, x2, #3 |
@@ -753,6 +740,9 @@ ENTRY(__kvm_vcpu_run) | |||
753 | // Guest context | 740 | // Guest context |
754 | add x2, x0, #VCPU_CONTEXT | 741 | add x2, x0, #VCPU_CONTEXT |
755 | 742 | ||
743 | // We must restore the 32-bit state before the sysregs, thanks | ||
744 | // to Cortex-A57 erratum #852523. | ||
745 | restore_guest_32bit_state | ||
756 | bl __restore_sysregs | 746 | bl __restore_sysregs |
757 | 747 | ||
758 | skip_debug_state x3, 1f | 748 | skip_debug_state x3, 1f |
@@ -760,7 +750,6 @@ ENTRY(__kvm_vcpu_run) | |||
760 | kern_hyp_va x3 | 750 | kern_hyp_va x3 |
761 | bl __restore_debug | 751 | bl __restore_debug |
762 | 1: | 752 | 1: |
763 | restore_guest_32bit_state | ||
764 | restore_guest_regs | 753 | restore_guest_regs |
765 | 754 | ||
766 | // That's it, no more messing around. | 755 | // That's it, no more messing around. |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b41607d270ac..d03d3af17e7e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -272,7 +272,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |||
272 | { | 272 | { |
273 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; | 273 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
274 | 274 | ||
275 | if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) | 275 | if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
276 | return -EFAULT; | 276 | return -EFAULT; |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
@@ -314,7 +314,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |||
314 | { | 314 | { |
315 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; | 315 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; |
316 | 316 | ||
317 | if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) | 317 | if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
318 | return -EFAULT; | 318 | return -EFAULT; |
319 | 319 | ||
320 | return 0; | 320 | return 0; |
@@ -358,7 +358,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |||
358 | { | 358 | { |
359 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; | 359 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; |
360 | 360 | ||
361 | if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) | 361 | if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
362 | return -EFAULT; | 362 | return -EFAULT; |
363 | return 0; | 363 | return 0; |
364 | } | 364 | } |
@@ -400,7 +400,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |||
400 | { | 400 | { |
401 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; | 401 | __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; |
402 | 402 | ||
403 | if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) | 403 | if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
404 | return -EFAULT; | 404 | return -EFAULT; |
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
@@ -539,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
539 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), | 539 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), |
540 | trap_dbgauthstatus_el1 }, | 540 | trap_dbgauthstatus_el1 }, |
541 | 541 | ||
542 | /* TEECR32_EL1 */ | ||
543 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
544 | NULL, reset_val, TEECR32_EL1, 0 }, | ||
545 | /* TEEHBR32_EL1 */ | ||
546 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), | ||
547 | NULL, reset_val, TEEHBR32_EL1, 0 }, | ||
548 | |||
549 | /* MDCCSR_EL1 */ | 542 | /* MDCCSR_EL1 */ |
550 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), | 543 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), |
551 | trap_raz_wi }, | 544 | trap_raz_wi }, |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0bcc4bc94b4a..99224dcebdc5 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
100 | if (IS_ENABLED(CONFIG_ZONE_DMA) && | 100 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
101 | dev->coherent_dma_mask <= DMA_BIT_MASK(32)) | 101 | dev->coherent_dma_mask <= DMA_BIT_MASK(32)) |
102 | flags |= GFP_DMA; | 102 | flags |= GFP_DMA; |
103 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { | 103 | if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) { |
104 | struct page *page; | 104 | struct page *page; |
105 | void *addr; | 105 | void *addr; |
106 | 106 | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index aba9ead1384c..9fadf6d7039b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -287,6 +287,7 @@ retry: | |||
287 | * starvation. | 287 | * starvation. |
288 | */ | 288 | */ |
289 | mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; | 289 | mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; |
290 | mm_flags |= FAULT_FLAG_TRIED; | ||
290 | goto retry; | 291 | goto retry; |
291 | } | 292 | } |
292 | } | 293 | } |
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild | |||
@@ -20,4 +20,5 @@ generic-y += sections.h | |||
20 | generic-y += topology.h | 20 | generic-y += topology.h |
21 | generic-y += trace_clock.h | 21 | generic-y += trace_clock.h |
22 | generic-y += vga.h | 22 | generic-y += vga.h |
23 | generic-y += word-at-a-time.h | ||
23 | generic-y += xor.h | 24 | generic-y += xor.h |
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index d51ff8f1c541..96cabad68489 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c | |||
@@ -144,7 +144,7 @@ static struct irq_chip eic_chip = { | |||
144 | .irq_set_type = eic_set_irq_type, | 144 | .irq_set_type = eic_set_irq_type, |
145 | }; | 145 | }; |
146 | 146 | ||
147 | static void demux_eic_irq(unsigned int irq, struct irq_desc *desc) | 147 | static void demux_eic_irq(struct irq_desc *desc) |
148 | { | 148 | { |
149 | struct eic *eic = irq_desc_get_handler_data(desc); | 149 | struct eic *eic = irq_desc_get_handler_data(desc); |
150 | unsigned long status, pending; | 150 | unsigned long status, pending; |
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index 157a5e0e789f..4f61378c3453 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c | |||
@@ -281,7 +281,7 @@ static struct irq_chip gpio_irqchip = { | |||
281 | .irq_set_type = gpio_irq_type, | 281 | .irq_set_type = gpio_irq_type, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 284 | static void gpio_irq_handler(struct irq_desc *desc) |
285 | { | 285 | { |
286 | struct pio_device *pio = irq_desc_get_chip_data(desc); | 286 | struct pio_device *pio = irq_desc_get_chip_data(desc); |
287 | unsigned gpio_irq; | 287 | unsigned gpio_irq; |
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild | |||
@@ -46,4 +46,5 @@ generic-y += types.h | |||
46 | generic-y += ucontext.h | 46 | generic-y += ucontext.h |
47 | generic-y += unaligned.h | 47 | generic-y += unaligned.h |
48 | generic-y += user.h | 48 | generic-y += user.h |
49 | generic-y += word-at-a-time.h | ||
49 | generic-y += xor.h | 50 | generic-y += xor.h |
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h index 4b2a992794d7..d2f90c72378e 100644 --- a/arch/blackfin/include/asm/irq_handler.h +++ b/arch/blackfin/include/asm/irq_handler.h | |||
@@ -60,7 +60,7 @@ extern void bfin_internal_mask_irq(unsigned int irq); | |||
60 | extern void bfin_internal_unmask_irq(unsigned int irq); | 60 | extern void bfin_internal_unmask_irq(unsigned int irq); |
61 | 61 | ||
62 | struct irq_desc; | 62 | struct irq_desc; |
63 | extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *); | 63 | extern void bfin_demux_mac_status_irq(struct irq_desc *); |
64 | extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *); | 64 | extern void bfin_demux_gpio_irq(struct irq_desc *); |
65 | 65 | ||
66 | #endif | 66 | #endif |
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 0ba25764b8c0..052cde5ed2e4 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -107,7 +107,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
107 | * than crashing, do something sensible. | 107 | * than crashing, do something sensible. |
108 | */ | 108 | */ |
109 | if (irq >= NR_IRQS) | 109 | if (irq >= NR_IRQS) |
110 | handle_bad_irq(irq, &bad_irq_desc); | 110 | handle_bad_irq(&bad_irq_desc); |
111 | else | 111 | else |
112 | generic_handle_irq(irq); | 112 | generic_handle_irq(irq); |
113 | 113 | ||
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c index 14b2f74554dc..a48baae4384d 100644 --- a/arch/blackfin/mach-bf537/ints-priority.c +++ b/arch/blackfin/mach-bf537/ints-priority.c | |||
@@ -89,8 +89,7 @@ static struct irq_chip bf537_generic_error_irqchip = { | |||
89 | .irq_unmask = bf537_generic_error_unmask_irq, | 89 | .irq_unmask = bf537_generic_error_unmask_irq, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static void bf537_demux_error_irq(unsigned int int_err_irq, | 92 | static void bf537_demux_error_irq(struct irq_desc *inta_desc) |
93 | struct irq_desc *inta_desc) | ||
94 | { | 93 | { |
95 | int irq = 0; | 94 | int irq = 0; |
96 | 95 | ||
@@ -182,15 +181,12 @@ static struct irq_chip bf537_mac_rx_irqchip = { | |||
182 | .irq_unmask = bf537_mac_rx_unmask_irq, | 181 | .irq_unmask = bf537_mac_rx_unmask_irq, |
183 | }; | 182 | }; |
184 | 183 | ||
185 | static void bf537_demux_mac_rx_irq(unsigned int __int_irq, | 184 | static void bf537_demux_mac_rx_irq(struct irq_desc *desc) |
186 | struct irq_desc *desc) | ||
187 | { | 185 | { |
188 | unsigned int int_irq = irq_desc_get_irq(desc); | ||
189 | |||
190 | if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) | 186 | if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) |
191 | bfin_handle_irq(IRQ_MAC_RX); | 187 | bfin_handle_irq(IRQ_MAC_RX); |
192 | else | 188 | else |
193 | bfin_demux_gpio_irq(int_irq, desc); | 189 | bfin_demux_gpio_irq(desc); |
194 | } | 190 | } |
195 | #endif | 191 | #endif |
196 | 192 | ||
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index a6d1b03cdf36..e8d4d748d0fd 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -656,8 +656,7 @@ static struct irq_chip bfin_mac_status_irqchip = { | |||
656 | .irq_set_wake = bfin_mac_status_set_wake, | 656 | .irq_set_wake = bfin_mac_status_set_wake, |
657 | }; | 657 | }; |
658 | 658 | ||
659 | void bfin_demux_mac_status_irq(unsigned int int_err_irq, | 659 | void bfin_demux_mac_status_irq(struct irq_desc *inta_desc) |
660 | struct irq_desc *inta_desc) | ||
661 | { | 660 | { |
662 | int i, irq = 0; | 661 | int i, irq = 0; |
663 | u32 status = bfin_read_EMAC_SYSTAT(); | 662 | u32 status = bfin_read_EMAC_SYSTAT(); |
@@ -825,7 +824,7 @@ static void bfin_demux_gpio_block(unsigned int irq) | |||
825 | } | 824 | } |
826 | } | 825 | } |
827 | 826 | ||
828 | void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc) | 827 | void bfin_demux_gpio_irq(struct irq_desc *desc) |
829 | { | 828 | { |
830 | unsigned int inta_irq = irq_desc_get_irq(desc); | 829 | unsigned int inta_irq = irq_desc_get_irq(desc); |
831 | unsigned int irq; | 830 | unsigned int irq; |
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
@@ -59,4 +59,5 @@ generic-y += types.h | |||
59 | generic-y += ucontext.h | 59 | generic-y += ucontext.h |
60 | generic-y += user.h | 60 | generic-y += user.h |
61 | generic-y += vga.h | 61 | generic-y += vga.h |
62 | generic-y += word-at-a-time.h | ||
62 | generic-y += xor.h | 63 | generic-y += xor.h |
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index d487698e978a..ddcb45d7dfa7 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c | |||
@@ -93,7 +93,7 @@ static struct irq_chip megamod_chip = { | |||
93 | .irq_unmask = unmask_megamod, | 93 | .irq_unmask = unmask_megamod, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc) | 96 | static void megamod_irq_cascade(struct irq_desc *desc) |
97 | { | 97 | { |
98 | struct megamod_cascade_data *cascade; | 98 | struct megamod_cascade_data *cascade; |
99 | struct megamod_pic *pic; | 99 | struct megamod_pic *pic; |
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild | |||
@@ -43,4 +43,5 @@ generic-y += topology.h | |||
43 | generic-y += trace_clock.h | 43 | generic-y += trace_clock.h |
44 | generic-y += types.h | 44 | generic-y += types.h |
45 | generic-y += vga.h | 45 | generic-y += vga.h |
46 | generic-y += word-at-a-time.h | ||
46 | generic-y += xor.h | 47 | generic-y += xor.h |
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild | |||
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h | |||
7 | generic-y += mm-arch-hooks.h | 7 | generic-y += mm-arch-hooks.h |
8 | generic-y += preempt.h | 8 | generic-y += preempt.h |
9 | generic-y += trace_clock.h | 9 | generic-y += trace_clock.h |
10 | generic-y += word-at-a-time.h | ||
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index f9c86c475bbd..f211839e2cae 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c | |||
@@ -294,6 +294,8 @@ void pcibios_fixup_bus(struct pci_bus *bus) | |||
294 | printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); | 294 | printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); |
295 | #endif | 295 | #endif |
296 | 296 | ||
297 | pci_read_bridge_bases(bus); | ||
298 | |||
297 | if (bus->number == 0) { | 299 | if (bus->number == 0) { |
298 | struct pci_dev *dev; | 300 | struct pci_dev *dev; |
299 | list_for_each_entry(dev, &bus->devices, bus_list) { | 301 | list_for_each_entry(dev, &bus->devices, bus_list) { |
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 70e6ae1e7006..373cb23301e3 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild | |||
@@ -73,4 +73,5 @@ generic-y += uaccess.h | |||
73 | generic-y += ucontext.h | 73 | generic-y += ucontext.h |
74 | generic-y += unaligned.h | 74 | generic-y += unaligned.h |
75 | generic-y += vga.h | 75 | generic-y += vga.h |
76 | generic-y += word-at-a-time.h | ||
76 | generic-y += xor.h | 77 | generic-y += xor.h |
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
@@ -58,4 +58,5 @@ generic-y += types.h | |||
58 | generic-y += ucontext.h | 58 | generic-y += ucontext.h |
59 | generic-y += unaligned.h | 59 | generic-y += unaligned.h |
60 | generic-y += vga.h | 60 | generic-y += vga.h |
61 | generic-y += word-at-a-time.h | ||
61 | generic-y += xor.h | 62 | generic-y += xor.h |
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild | |||
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h | |||
8 | generic-y += preempt.h | 8 | generic-y += preempt.h |
9 | generic-y += trace_clock.h | 9 | generic-y += trace_clock.h |
10 | generic-y += vtime.h | 10 | generic-y += vtime.h |
11 | generic-y += word-at-a-time.h | ||
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 95c39b95e97e..99c96a5e6016 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | 13 | ||
14 | #define NR_syscalls 319 /* length of syscall table */ | 14 | #define NR_syscalls 321 /* length of syscall table */ |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * The following defines stop scripts/checksyscalls.sh from complaining about | 17 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 461079560c78..98e94e19a5a0 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h | |||
@@ -332,5 +332,7 @@ | |||
332 | #define __NR_memfd_create 1340 | 332 | #define __NR_memfd_create 1340 |
333 | #define __NR_bpf 1341 | 333 | #define __NR_bpf 1341 |
334 | #define __NR_execveat 1342 | 334 | #define __NR_execveat 1342 |
335 | #define __NR_userfaultfd 1343 | ||
336 | #define __NR_membarrier 1344 | ||
335 | 337 | ||
336 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ | 338 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ae0de7bf5525..37cc7a65cd3e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1768,5 +1768,7 @@ sys_call_table: | |||
1768 | data8 sys_memfd_create // 1340 | 1768 | data8 sys_memfd_create // 1340 |
1769 | data8 sys_bpf | 1769 | data8 sys_bpf |
1770 | data8 sys_execveat | 1770 | data8 sys_execveat |
1771 | data8 sys_userfaultfd | ||
1772 | data8 sys_membarrier | ||
1771 | 1773 | ||
1772 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1774 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d89b6013c941..7cc3be9fa7c6 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -533,9 +533,10 @@ void pcibios_fixup_bus(struct pci_bus *b) | |||
533 | { | 533 | { |
534 | struct pci_dev *dev; | 534 | struct pci_dev *dev; |
535 | 535 | ||
536 | if (b->self) | 536 | if (b->self) { |
537 | pci_read_bridge_bases(b); | ||
537 | pcibios_fixup_bridge_resources(b->self); | 538 | pcibios_fixup_bridge_resources(b->self); |
538 | 539 | } | |
539 | list_for_each_entry(dev, &b->devices, bus_list) | 540 | list_for_each_entry(dev, &b->devices, bus_list) |
540 | pcibios_fixup_device_resources(dev); | 541 | pcibios_fixup_device_resources(dev); |
541 | platform_pci_fixup_bus(b); | 542 | platform_pci_fixup_bus(b); |
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild | |||
@@ -9,3 +9,4 @@ generic-y += module.h | |||
9 | generic-y += preempt.h | 9 | generic-y += preempt.h |
10 | generic-y += sections.h | 10 | generic-y += sections.h |
11 | generic-y += trace_clock.h | 11 | generic-y += trace_clock.h |
12 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c index 47b5f90002ab..7ff739e94896 100644 --- a/arch/m68k/amiga/amiints.c +++ b/arch/m68k/amiga/amiints.c | |||
@@ -46,7 +46,7 @@ static struct irq_chip amiga_irq_chip = { | |||
46 | * The builtin Amiga hardware interrupt handlers. | 46 | * The builtin Amiga hardware interrupt handlers. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | static void ami_int1(unsigned int irq, struct irq_desc *desc) | 49 | static void ami_int1(struct irq_desc *desc) |
50 | { | 50 | { |
51 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; | 51 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; |
52 | 52 | ||
@@ -69,7 +69,7 @@ static void ami_int1(unsigned int irq, struct irq_desc *desc) | |||
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
72 | static void ami_int3(unsigned int irq, struct irq_desc *desc) | 72 | static void ami_int3(struct irq_desc *desc) |
73 | { | 73 | { |
74 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; | 74 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; |
75 | 75 | ||
@@ -92,7 +92,7 @@ static void ami_int3(unsigned int irq, struct irq_desc *desc) | |||
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
95 | static void ami_int4(unsigned int irq, struct irq_desc *desc) | 95 | static void ami_int4(struct irq_desc *desc) |
96 | { | 96 | { |
97 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; | 97 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; |
98 | 98 | ||
@@ -121,7 +121,7 @@ static void ami_int4(unsigned int irq, struct irq_desc *desc) | |||
121 | } | 121 | } |
122 | } | 122 | } |
123 | 123 | ||
124 | static void ami_int5(unsigned int irq, struct irq_desc *desc) | 124 | static void ami_int5(struct irq_desc *desc) |
125 | { | 125 | { |
126 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; | 126 | unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; |
127 | 127 | ||
diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c index 47371de60427..b0a19e207a63 100644 --- a/arch/m68k/coldfire/intc-5272.c +++ b/arch/m68k/coldfire/intc-5272.c | |||
@@ -143,12 +143,10 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) | |||
143 | * We need to be careful with the masking/acking due to the side effects | 143 | * We need to be careful with the masking/acking due to the side effects |
144 | * of masking an interrupt. | 144 | * of masking an interrupt. |
145 | */ | 145 | */ |
146 | static void intc_external_irq(unsigned int __irq, struct irq_desc *desc) | 146 | static void intc_external_irq(struct irq_desc *desc) |
147 | { | 147 | { |
148 | unsigned int irq = irq_desc_get_irq(desc); | ||
149 | |||
150 | irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); | 148 | irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); |
151 | handle_simple_irq(irq, desc); | 149 | handle_simple_irq(desc); |
152 | } | 150 | } |
153 | 151 | ||
154 | static struct irq_chip intc_irq_chip = { | 152 | static struct irq_chip intc_irq_chip = { |
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
57 | CONFIG_NET_IPGRE=m | 58 | CONFIG_NET_IPGRE=m |
58 | CONFIG_NET_IPVTI=m | 59 | CONFIG_NET_IPVTI=m |
59 | CONFIG_NET_FOU_IP_TUNNELS=y | 60 | CONFIG_NET_FOU_IP_TUNNELS=y |
60 | CONFIG_GENEVE_CORE=m | ||
61 | CONFIG_INET_AH=m | 61 | CONFIG_INET_AH=m |
62 | CONFIG_INET_ESP=m | 62 | CONFIG_INET_ESP=m |
63 | CONFIG_INET_IPCOMP=m | 63 | CONFIG_INET_IPCOMP=m |
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
67 | # CONFIG_INET_LRO is not set | 67 | # CONFIG_INET_LRO is not set |
68 | CONFIG_INET_DIAG=m | 68 | CONFIG_INET_DIAG=m |
69 | CONFIG_INET_UDP_DIAG=m | 69 | CONFIG_INET_UDP_DIAG=m |
70 | CONFIG_IPV6=m | ||
70 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
71 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
72 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
73 | CONFIG_INET6_IPCOMP=m | 74 | CONFIG_INET6_IPCOMP=m |
75 | CONFIG_IPV6_ILA=m | ||
74 | CONFIG_IPV6_VTI=m | 76 | CONFIG_IPV6_VTI=m |
75 | CONFIG_IPV6_GRE=m | 77 | CONFIG_IPV6_GRE=m |
76 | CONFIG_NETFILTER=y | 78 | CONFIG_NETFILTER=y |
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
179 | CONFIG_IP_SET_LIST_SET=m | 181 | CONFIG_IP_SET_LIST_SET=m |
180 | CONFIG_NF_CONNTRACK_IPV4=m | 182 | CONFIG_NF_CONNTRACK_IPV4=m |
181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 183 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
184 | CONFIG_NFT_DUP_IPV4=m | ||
182 | CONFIG_NF_TABLES_ARP=m | 185 | CONFIG_NF_TABLES_ARP=m |
183 | CONFIG_NF_LOG_ARP=m | 186 | CONFIG_NF_LOG_ARP=m |
184 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 187 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
206 | CONFIG_IP_NF_ARP_MANGLE=m | 209 | CONFIG_IP_NF_ARP_MANGLE=m |
207 | CONFIG_NF_CONNTRACK_IPV6=m | 210 | CONFIG_NF_CONNTRACK_IPV6=m |
208 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 211 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
212 | CONFIG_NFT_DUP_IPV6=m | ||
209 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 213 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
210 | CONFIG_NFT_MASQ_IPV6=m | 214 | CONFIG_NFT_MASQ_IPV6=m |
211 | CONFIG_NFT_REDIR_IPV6=m | 215 | CONFIG_NFT_REDIR_IPV6=m |
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m | |||
271 | CONFIG_MPLS=y | 275 | CONFIG_MPLS=y |
272 | CONFIG_NET_MPLS_GSO=m | 276 | CONFIG_NET_MPLS_GSO=m |
273 | CONFIG_MPLS_ROUTING=m | 277 | CONFIG_MPLS_ROUTING=m |
278 | CONFIG_MPLS_IPTUNNEL=m | ||
274 | # CONFIG_WIRELESS is not set | 279 | # CONFIG_WIRELESS is not set |
275 | # CONFIG_UEVENT_HELPER is not set | 280 | # CONFIG_UEVENT_HELPER is not set |
276 | CONFIG_DEVTMPFS=y | 281 | CONFIG_DEVTMPFS=y |
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y | |||
370 | # CONFIG_NET_VENDOR_SEEQ is not set | 375 | # CONFIG_NET_VENDOR_SEEQ is not set |
371 | # CONFIG_NET_VENDOR_SMSC is not set | 376 | # CONFIG_NET_VENDOR_SMSC is not set |
372 | # CONFIG_NET_VENDOR_STMICRO is not set | 377 | # CONFIG_NET_VENDOR_STMICRO is not set |
378 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
373 | # CONFIG_NET_VENDOR_VIA is not set | 379 | # CONFIG_NET_VENDOR_VIA is not set |
374 | # CONFIG_NET_VENDOR_WIZNET is not set | 380 | # CONFIG_NET_VENDOR_WIZNET is not set |
375 | CONFIG_PPP=m | 381 | CONFIG_PPP=m |
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m | |||
537 | CONFIG_TEST_BPF=m | 543 | CONFIG_TEST_BPF=m |
538 | CONFIG_TEST_FIRMWARE=m | 544 | CONFIG_TEST_FIRMWARE=m |
539 | CONFIG_TEST_UDELAY=m | 545 | CONFIG_TEST_UDELAY=m |
546 | CONFIG_TEST_STATIC_KEYS=m | ||
540 | CONFIG_EARLY_PRINTK=y | 547 | CONFIG_EARLY_PRINTK=y |
541 | CONFIG_ENCRYPTED_KEYS=m | 548 | CONFIG_ENCRYPTED_KEYS=m |
542 | CONFIG_CRYPTO_RSA=m | 549 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -344,6 +349,7 @@ CONFIG_VETH=m | |||
344 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 349 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
345 | # CONFIG_NET_VENDOR_SEEQ is not set | 350 | # CONFIG_NET_VENDOR_SEEQ is not set |
346 | # CONFIG_NET_VENDOR_STMICRO is not set | 351 | # CONFIG_NET_VENDOR_STMICRO is not set |
352 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
347 | # CONFIG_NET_VENDOR_VIA is not set | 353 | # CONFIG_NET_VENDOR_VIA is not set |
348 | # CONFIG_NET_VENDOR_WIZNET is not set | 354 | # CONFIG_NET_VENDOR_WIZNET is not set |
349 | CONFIG_PPP=m | 355 | CONFIG_PPP=m |
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m | |||
495 | CONFIG_TEST_BPF=m | 501 | CONFIG_TEST_BPF=m |
496 | CONFIG_TEST_FIRMWARE=m | 502 | CONFIG_TEST_FIRMWARE=m |
497 | CONFIG_TEST_UDELAY=m | 503 | CONFIG_TEST_UDELAY=m |
504 | CONFIG_TEST_STATIC_KEYS=m | ||
498 | CONFIG_EARLY_PRINTK=y | 505 | CONFIG_EARLY_PRINTK=y |
499 | CONFIG_ENCRYPTED_KEYS=m | 506 | CONFIG_ENCRYPTED_KEYS=m |
500 | CONFIG_CRYPTO_RSA=m | 507 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -355,6 +360,7 @@ CONFIG_NE2000=y | |||
355 | # CONFIG_NET_VENDOR_SEEQ is not set | 360 | # CONFIG_NET_VENDOR_SEEQ is not set |
356 | CONFIG_SMC91X=y | 361 | CONFIG_SMC91X=y |
357 | # CONFIG_NET_VENDOR_STMICRO is not set | 362 | # CONFIG_NET_VENDOR_STMICRO is not set |
363 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
358 | # CONFIG_NET_VENDOR_VIA is not set | 364 | # CONFIG_NET_VENDOR_VIA is not set |
359 | # CONFIG_NET_VENDOR_WIZNET is not set | 365 | # CONFIG_NET_VENDOR_WIZNET is not set |
360 | CONFIG_PPP=m | 366 | CONFIG_PPP=m |
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m | |||
517 | CONFIG_TEST_BPF=m | 523 | CONFIG_TEST_BPF=m |
518 | CONFIG_TEST_FIRMWARE=m | 524 | CONFIG_TEST_FIRMWARE=m |
519 | CONFIG_TEST_UDELAY=m | 525 | CONFIG_TEST_UDELAY=m |
526 | CONFIG_TEST_STATIC_KEYS=m | ||
520 | CONFIG_EARLY_PRINTK=y | 527 | CONFIG_EARLY_PRINTK=y |
521 | CONFIG_ENCRYPTED_KEYS=m | 528 | CONFIG_ENCRYPTED_KEYS=m |
522 | CONFIG_CRYPTO_RSA=m | 529 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y | |||
345 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 350 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
346 | # CONFIG_NET_VENDOR_SEEQ is not set | 351 | # CONFIG_NET_VENDOR_SEEQ is not set |
347 | # CONFIG_NET_VENDOR_STMICRO is not set | 352 | # CONFIG_NET_VENDOR_STMICRO is not set |
353 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
348 | # CONFIG_NET_VENDOR_VIA is not set | 354 | # CONFIG_NET_VENDOR_VIA is not set |
349 | # CONFIG_NET_VENDOR_WIZNET is not set | 355 | # CONFIG_NET_VENDOR_WIZNET is not set |
350 | CONFIG_PPP=m | 356 | CONFIG_PPP=m |
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m | |||
497 | CONFIG_TEST_BPF=m | 503 | CONFIG_TEST_BPF=m |
498 | CONFIG_TEST_FIRMWARE=m | 504 | CONFIG_TEST_FIRMWARE=m |
499 | CONFIG_TEST_UDELAY=m | 505 | CONFIG_TEST_UDELAY=m |
506 | CONFIG_TEST_STATIC_KEYS=m | ||
500 | CONFIG_EARLY_PRINTK=y | 507 | CONFIG_EARLY_PRINTK=y |
501 | CONFIG_ENCRYPTED_KEYS=m | 508 | CONFIG_ENCRYPTED_KEYS=m |
502 | CONFIG_CRYPTO_RSA=m | 509 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
54 | CONFIG_NET_IPGRE=m | 55 | CONFIG_NET_IPGRE=m |
55 | CONFIG_NET_IPVTI=m | 56 | CONFIG_NET_IPVTI=m |
56 | CONFIG_NET_FOU_IP_TUNNELS=y | 57 | CONFIG_NET_FOU_IP_TUNNELS=y |
57 | CONFIG_GENEVE_CORE=m | ||
58 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
59 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
60 | CONFIG_INET_IPCOMP=m | 60 | CONFIG_INET_IPCOMP=m |
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
64 | # CONFIG_INET_LRO is not set | 64 | # CONFIG_INET_LRO is not set |
65 | CONFIG_INET_DIAG=m | 65 | CONFIG_INET_DIAG=m |
66 | CONFIG_INET_UDP_DIAG=m | 66 | CONFIG_INET_UDP_DIAG=m |
67 | CONFIG_IPV6=m | ||
67 | CONFIG_IPV6_ROUTER_PREF=y | 68 | CONFIG_IPV6_ROUTER_PREF=y |
68 | CONFIG_INET6_AH=m | 69 | CONFIG_INET6_AH=m |
69 | CONFIG_INET6_ESP=m | 70 | CONFIG_INET6_ESP=m |
70 | CONFIG_INET6_IPCOMP=m | 71 | CONFIG_INET6_IPCOMP=m |
72 | CONFIG_IPV6_ILA=m | ||
71 | CONFIG_IPV6_VTI=m | 73 | CONFIG_IPV6_VTI=m |
72 | CONFIG_IPV6_GRE=m | 74 | CONFIG_IPV6_GRE=m |
73 | CONFIG_NETFILTER=y | 75 | CONFIG_NETFILTER=y |
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
176 | CONFIG_IP_SET_LIST_SET=m | 178 | CONFIG_IP_SET_LIST_SET=m |
177 | CONFIG_NF_CONNTRACK_IPV4=m | 179 | CONFIG_NF_CONNTRACK_IPV4=m |
178 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 180 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
181 | CONFIG_NFT_DUP_IPV4=m | ||
179 | CONFIG_NF_TABLES_ARP=m | 182 | CONFIG_NF_TABLES_ARP=m |
180 | CONFIG_NF_LOG_ARP=m | 183 | CONFIG_NF_LOG_ARP=m |
181 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 184 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
203 | CONFIG_IP_NF_ARP_MANGLE=m | 206 | CONFIG_IP_NF_ARP_MANGLE=m |
204 | CONFIG_NF_CONNTRACK_IPV6=m | 207 | CONFIG_NF_CONNTRACK_IPV6=m |
205 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 208 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
209 | CONFIG_NFT_DUP_IPV6=m | ||
206 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 210 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
207 | CONFIG_NFT_MASQ_IPV6=m | 211 | CONFIG_NFT_MASQ_IPV6=m |
208 | CONFIG_NFT_REDIR_IPV6=m | 212 | CONFIG_NFT_REDIR_IPV6=m |
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m | |||
271 | CONFIG_MPLS=y | 275 | CONFIG_MPLS=y |
272 | CONFIG_NET_MPLS_GSO=m | 276 | CONFIG_NET_MPLS_GSO=m |
273 | CONFIG_MPLS_ROUTING=m | 277 | CONFIG_MPLS_ROUTING=m |
278 | CONFIG_MPLS_IPTUNNEL=m | ||
274 | # CONFIG_WIRELESS is not set | 279 | # CONFIG_WIRELESS is not set |
275 | # CONFIG_UEVENT_HELPER is not set | 280 | # CONFIG_UEVENT_HELPER is not set |
276 | CONFIG_DEVTMPFS=y | 281 | CONFIG_DEVTMPFS=y |
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y | |||
364 | # CONFIG_NET_VENDOR_SEEQ is not set | 369 | # CONFIG_NET_VENDOR_SEEQ is not set |
365 | # CONFIG_NET_VENDOR_SMSC is not set | 370 | # CONFIG_NET_VENDOR_SMSC is not set |
366 | # CONFIG_NET_VENDOR_STMICRO is not set | 371 | # CONFIG_NET_VENDOR_STMICRO is not set |
372 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
367 | # CONFIG_NET_VENDOR_VIA is not set | 373 | # CONFIG_NET_VENDOR_VIA is not set |
368 | # CONFIG_NET_VENDOR_WIZNET is not set | 374 | # CONFIG_NET_VENDOR_WIZNET is not set |
369 | CONFIG_PPP=m | 375 | CONFIG_PPP=m |
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m | |||
519 | CONFIG_TEST_BPF=m | 525 | CONFIG_TEST_BPF=m |
520 | CONFIG_TEST_FIRMWARE=m | 526 | CONFIG_TEST_FIRMWARE=m |
521 | CONFIG_TEST_UDELAY=m | 527 | CONFIG_TEST_UDELAY=m |
528 | CONFIG_TEST_STATIC_KEYS=m | ||
522 | CONFIG_EARLY_PRINTK=y | 529 | CONFIG_EARLY_PRINTK=y |
523 | CONFIG_ENCRYPTED_KEYS=m | 530 | CONFIG_ENCRYPTED_KEYS=m |
524 | CONFIG_CRYPTO_RSA=m | 531 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
64 | CONFIG_NET_IPGRE=m | 65 | CONFIG_NET_IPGRE=m |
65 | CONFIG_NET_IPVTI=m | 66 | CONFIG_NET_IPVTI=m |
66 | CONFIG_NET_FOU_IP_TUNNELS=y | 67 | CONFIG_NET_FOU_IP_TUNNELS=y |
67 | CONFIG_GENEVE_CORE=m | ||
68 | CONFIG_INET_AH=m | 68 | CONFIG_INET_AH=m |
69 | CONFIG_INET_ESP=m | 69 | CONFIG_INET_ESP=m |
70 | CONFIG_INET_IPCOMP=m | 70 | CONFIG_INET_IPCOMP=m |
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
74 | # CONFIG_INET_LRO is not set | 74 | # CONFIG_INET_LRO is not set |
75 | CONFIG_INET_DIAG=m | 75 | CONFIG_INET_DIAG=m |
76 | CONFIG_INET_UDP_DIAG=m | 76 | CONFIG_INET_UDP_DIAG=m |
77 | CONFIG_IPV6=m | ||
77 | CONFIG_IPV6_ROUTER_PREF=y | 78 | CONFIG_IPV6_ROUTER_PREF=y |
78 | CONFIG_INET6_AH=m | 79 | CONFIG_INET6_AH=m |
79 | CONFIG_INET6_ESP=m | 80 | CONFIG_INET6_ESP=m |
80 | CONFIG_INET6_IPCOMP=m | 81 | CONFIG_INET6_IPCOMP=m |
82 | CONFIG_IPV6_ILA=m | ||
81 | CONFIG_IPV6_VTI=m | 83 | CONFIG_IPV6_VTI=m |
82 | CONFIG_IPV6_GRE=m | 84 | CONFIG_IPV6_GRE=m |
83 | CONFIG_NETFILTER=y | 85 | CONFIG_NETFILTER=y |
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
186 | CONFIG_IP_SET_LIST_SET=m | 188 | CONFIG_IP_SET_LIST_SET=m |
187 | CONFIG_NF_CONNTRACK_IPV4=m | 189 | CONFIG_NF_CONNTRACK_IPV4=m |
188 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 190 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
191 | CONFIG_NFT_DUP_IPV4=m | ||
189 | CONFIG_NF_TABLES_ARP=m | 192 | CONFIG_NF_TABLES_ARP=m |
190 | CONFIG_NF_LOG_ARP=m | 193 | CONFIG_NF_LOG_ARP=m |
191 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 194 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
213 | CONFIG_IP_NF_ARP_MANGLE=m | 216 | CONFIG_IP_NF_ARP_MANGLE=m |
214 | CONFIG_NF_CONNTRACK_IPV6=m | 217 | CONFIG_NF_CONNTRACK_IPV6=m |
215 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 218 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
219 | CONFIG_NFT_DUP_IPV6=m | ||
216 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 220 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
217 | CONFIG_NFT_MASQ_IPV6=m | 221 | CONFIG_NFT_MASQ_IPV6=m |
218 | CONFIG_NFT_REDIR_IPV6=m | 222 | CONFIG_NFT_REDIR_IPV6=m |
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m | |||
281 | CONFIG_MPLS=y | 285 | CONFIG_MPLS=y |
282 | CONFIG_NET_MPLS_GSO=m | 286 | CONFIG_NET_MPLS_GSO=m |
283 | CONFIG_MPLS_ROUTING=m | 287 | CONFIG_MPLS_ROUTING=m |
288 | CONFIG_MPLS_IPTUNNEL=m | ||
284 | # CONFIG_WIRELESS is not set | 289 | # CONFIG_WIRELESS is not set |
285 | # CONFIG_UEVENT_HELPER is not set | 290 | # CONFIG_UEVENT_HELPER is not set |
286 | CONFIG_DEVTMPFS=y | 291 | CONFIG_DEVTMPFS=y |
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y | |||
410 | # CONFIG_NET_VENDOR_SEEQ is not set | 415 | # CONFIG_NET_VENDOR_SEEQ is not set |
411 | CONFIG_SMC91X=y | 416 | CONFIG_SMC91X=y |
412 | # CONFIG_NET_VENDOR_STMICRO is not set | 417 | # CONFIG_NET_VENDOR_STMICRO is not set |
418 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
413 | # CONFIG_NET_VENDOR_VIA is not set | 419 | # CONFIG_NET_VENDOR_VIA is not set |
414 | # CONFIG_NET_VENDOR_WIZNET is not set | 420 | # CONFIG_NET_VENDOR_WIZNET is not set |
415 | CONFIG_PLIP=m | 421 | CONFIG_PLIP=m |
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m | |||
599 | CONFIG_TEST_BPF=m | 605 | CONFIG_TEST_BPF=m |
600 | CONFIG_TEST_FIRMWARE=m | 606 | CONFIG_TEST_FIRMWARE=m |
601 | CONFIG_TEST_UDELAY=m | 607 | CONFIG_TEST_UDELAY=m |
608 | CONFIG_TEST_STATIC_KEYS=m | ||
602 | CONFIG_EARLY_PRINTK=y | 609 | CONFIG_EARLY_PRINTK=y |
603 | CONFIG_ENCRYPTED_KEYS=m | 610 | CONFIG_ENCRYPTED_KEYS=m |
604 | CONFIG_CRYPTO_RSA=m | 611 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
52 | CONFIG_NET_IPGRE=m | 53 | CONFIG_NET_IPGRE=m |
53 | CONFIG_NET_IPVTI=m | 54 | CONFIG_NET_IPVTI=m |
54 | CONFIG_NET_FOU_IP_TUNNELS=y | 55 | CONFIG_NET_FOU_IP_TUNNELS=y |
55 | CONFIG_GENEVE_CORE=m | ||
56 | CONFIG_INET_AH=m | 56 | CONFIG_INET_AH=m |
57 | CONFIG_INET_ESP=m | 57 | CONFIG_INET_ESP=m |
58 | CONFIG_INET_IPCOMP=m | 58 | CONFIG_INET_IPCOMP=m |
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
62 | # CONFIG_INET_LRO is not set | 62 | # CONFIG_INET_LRO is not set |
63 | CONFIG_INET_DIAG=m | 63 | CONFIG_INET_DIAG=m |
64 | CONFIG_INET_UDP_DIAG=m | 64 | CONFIG_INET_UDP_DIAG=m |
65 | CONFIG_IPV6=m | ||
65 | CONFIG_IPV6_ROUTER_PREF=y | 66 | CONFIG_IPV6_ROUTER_PREF=y |
66 | CONFIG_INET6_AH=m | 67 | CONFIG_INET6_AH=m |
67 | CONFIG_INET6_ESP=m | 68 | CONFIG_INET6_ESP=m |
68 | CONFIG_INET6_IPCOMP=m | 69 | CONFIG_INET6_IPCOMP=m |
70 | CONFIG_IPV6_ILA=m | ||
69 | CONFIG_IPV6_VTI=m | 71 | CONFIG_IPV6_VTI=m |
70 | CONFIG_IPV6_GRE=m | 72 | CONFIG_IPV6_GRE=m |
71 | CONFIG_NETFILTER=y | 73 | CONFIG_NETFILTER=y |
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
174 | CONFIG_IP_SET_LIST_SET=m | 176 | CONFIG_IP_SET_LIST_SET=m |
175 | CONFIG_NF_CONNTRACK_IPV4=m | 177 | CONFIG_NF_CONNTRACK_IPV4=m |
176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 178 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
179 | CONFIG_NFT_DUP_IPV4=m | ||
177 | CONFIG_NF_TABLES_ARP=m | 180 | CONFIG_NF_TABLES_ARP=m |
178 | CONFIG_NF_LOG_ARP=m | 181 | CONFIG_NF_LOG_ARP=m |
179 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 182 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
201 | CONFIG_IP_NF_ARP_MANGLE=m | 204 | CONFIG_IP_NF_ARP_MANGLE=m |
202 | CONFIG_NF_CONNTRACK_IPV6=m | 205 | CONFIG_NF_CONNTRACK_IPV6=m |
203 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
207 | CONFIG_NFT_DUP_IPV6=m | ||
204 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 208 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
205 | CONFIG_NFT_MASQ_IPV6=m | 209 | CONFIG_NFT_MASQ_IPV6=m |
206 | CONFIG_NFT_REDIR_IPV6=m | 210 | CONFIG_NFT_REDIR_IPV6=m |
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m | |||
266 | CONFIG_MPLS=y | 270 | CONFIG_MPLS=y |
267 | CONFIG_NET_MPLS_GSO=m | 271 | CONFIG_NET_MPLS_GSO=m |
268 | CONFIG_MPLS_ROUTING=m | 272 | CONFIG_MPLS_ROUTING=m |
273 | CONFIG_MPLS_IPTUNNEL=m | ||
269 | # CONFIG_WIRELESS is not set | 274 | # CONFIG_WIRELESS is not set |
270 | # CONFIG_UEVENT_HELPER is not set | 275 | # CONFIG_UEVENT_HELPER is not set |
271 | CONFIG_DEVTMPFS=y | 276 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -354,6 +359,7 @@ CONFIG_NE2000=y | |||
354 | # CONFIG_NET_VENDOR_SEEQ is not set | 359 | # CONFIG_NET_VENDOR_SEEQ is not set |
355 | # CONFIG_NET_VENDOR_SMSC is not set | 360 | # CONFIG_NET_VENDOR_SMSC is not set |
356 | # CONFIG_NET_VENDOR_STMICRO is not set | 361 | # CONFIG_NET_VENDOR_STMICRO is not set |
362 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
357 | # CONFIG_NET_VENDOR_VIA is not set | 363 | # CONFIG_NET_VENDOR_VIA is not set |
358 | # CONFIG_NET_VENDOR_WIZNET is not set | 364 | # CONFIG_NET_VENDOR_WIZNET is not set |
359 | CONFIG_PLIP=m | 365 | CONFIG_PLIP=m |
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m | |||
510 | CONFIG_TEST_BPF=m | 516 | CONFIG_TEST_BPF=m |
511 | CONFIG_TEST_FIRMWARE=m | 517 | CONFIG_TEST_FIRMWARE=m |
512 | CONFIG_TEST_UDELAY=m | 518 | CONFIG_TEST_UDELAY=m |
519 | CONFIG_TEST_STATIC_KEYS=m | ||
513 | CONFIG_EARLY_PRINTK=y | 520 | CONFIG_EARLY_PRINTK=y |
514 | CONFIG_ENCRYPTED_KEYS=m | 521 | CONFIG_ENCRYPTED_KEYS=m |
515 | CONFIG_CRYPTO_RSA=m | 522 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
50 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
51 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
52 | CONFIG_NET_FOU_IP_TUNNELS=y | 53 | CONFIG_NET_FOU_IP_TUNNELS=y |
53 | CONFIG_GENEVE_CORE=m | ||
54 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
56 | CONFIG_INET_IPCOMP=m | 56 | CONFIG_INET_IPCOMP=m |
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6=m | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 64 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 65 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 66 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 67 | CONFIG_INET6_IPCOMP=m |
68 | CONFIG_IPV6_ILA=m | ||
67 | CONFIG_IPV6_VTI=m | 69 | CONFIG_IPV6_VTI=m |
68 | CONFIG_IPV6_GRE=m | 70 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 71 | CONFIG_NETFILTER=y |
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
172 | CONFIG_IP_SET_LIST_SET=m | 174 | CONFIG_IP_SET_LIST_SET=m |
173 | CONFIG_NF_CONNTRACK_IPV4=m | 175 | CONFIG_NF_CONNTRACK_IPV4=m |
174 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
177 | CONFIG_NFT_DUP_IPV4=m | ||
175 | CONFIG_NF_TABLES_ARP=m | 178 | CONFIG_NF_TABLES_ARP=m |
176 | CONFIG_NF_LOG_ARP=m | 179 | CONFIG_NF_LOG_ARP=m |
177 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 180 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
199 | CONFIG_IP_NF_ARP_MANGLE=m | 202 | CONFIG_IP_NF_ARP_MANGLE=m |
200 | CONFIG_NF_CONNTRACK_IPV6=m | 203 | CONFIG_NF_CONNTRACK_IPV6=m |
201 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
205 | CONFIG_NFT_DUP_IPV6=m | ||
202 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 206 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
203 | CONFIG_NFT_MASQ_IPV6=m | 207 | CONFIG_NFT_MASQ_IPV6=m |
204 | CONFIG_NFT_REDIR_IPV6=m | 208 | CONFIG_NFT_REDIR_IPV6=m |
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m | |||
264 | CONFIG_MPLS=y | 268 | CONFIG_MPLS=y |
265 | CONFIG_NET_MPLS_GSO=m | 269 | CONFIG_NET_MPLS_GSO=m |
266 | CONFIG_MPLS_ROUTING=m | 270 | CONFIG_MPLS_ROUTING=m |
271 | CONFIG_MPLS_IPTUNNEL=m | ||
267 | # CONFIG_WIRELESS is not set | 272 | # CONFIG_WIRELESS is not set |
268 | # CONFIG_UEVENT_HELPER is not set | 273 | # CONFIG_UEVENT_HELPER is not set |
269 | CONFIG_DEVTMPFS=y | 274 | CONFIG_DEVTMPFS=y |
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y | |||
341 | # CONFIG_NET_VENDOR_SEEQ is not set | 346 | # CONFIG_NET_VENDOR_SEEQ is not set |
342 | # CONFIG_NET_VENDOR_STMICRO is not set | 347 | # CONFIG_NET_VENDOR_STMICRO is not set |
343 | # CONFIG_NET_VENDOR_SUN is not set | 348 | # CONFIG_NET_VENDOR_SUN is not set |
349 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
344 | # CONFIG_NET_VENDOR_VIA is not set | 350 | # CONFIG_NET_VENDOR_VIA is not set |
345 | # CONFIG_NET_VENDOR_WIZNET is not set | 351 | # CONFIG_NET_VENDOR_WIZNET is not set |
346 | CONFIG_PPP=m | 352 | CONFIG_PPP=m |
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m | |||
489 | CONFIG_TEST_BPF=m | 495 | CONFIG_TEST_BPF=m |
490 | CONFIG_TEST_FIRMWARE=m | 496 | CONFIG_TEST_FIRMWARE=m |
491 | CONFIG_TEST_UDELAY=m | 497 | CONFIG_TEST_UDELAY=m |
498 | CONFIG_TEST_STATIC_KEYS=m | ||
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
494 | CONFIG_CRYPTO_MANAGER=y | 501 | CONFIG_CRYPTO_MANAGER=y |
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
50 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
51 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
52 | CONFIG_NET_FOU_IP_TUNNELS=y | 53 | CONFIG_NET_FOU_IP_TUNNELS=y |
53 | CONFIG_GENEVE_CORE=m | ||
54 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
56 | CONFIG_INET_IPCOMP=m | 56 | CONFIG_INET_IPCOMP=m |
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6=m | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 64 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 65 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 66 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 67 | CONFIG_INET6_IPCOMP=m |
68 | CONFIG_IPV6_ILA=m | ||
67 | CONFIG_IPV6_VTI=m | 69 | CONFIG_IPV6_VTI=m |
68 | CONFIG_IPV6_GRE=m | 70 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 71 | CONFIG_NETFILTER=y |
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
172 | CONFIG_IP_SET_LIST_SET=m | 174 | CONFIG_IP_SET_LIST_SET=m |
173 | CONFIG_NF_CONNTRACK_IPV4=m | 175 | CONFIG_NF_CONNTRACK_IPV4=m |
174 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
177 | CONFIG_NFT_DUP_IPV4=m | ||
175 | CONFIG_NF_TABLES_ARP=m | 178 | CONFIG_NF_TABLES_ARP=m |
176 | CONFIG_NF_LOG_ARP=m | 179 | CONFIG_NF_LOG_ARP=m |
177 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 180 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
199 | CONFIG_IP_NF_ARP_MANGLE=m | 202 | CONFIG_IP_NF_ARP_MANGLE=m |
200 | CONFIG_NF_CONNTRACK_IPV6=m | 203 | CONFIG_NF_CONNTRACK_IPV6=m |
201 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
205 | CONFIG_NFT_DUP_IPV6=m | ||
202 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 206 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
203 | CONFIG_NFT_MASQ_IPV6=m | 207 | CONFIG_NFT_MASQ_IPV6=m |
204 | CONFIG_NFT_REDIR_IPV6=m | 208 | CONFIG_NFT_REDIR_IPV6=m |
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m | |||
264 | CONFIG_MPLS=y | 268 | CONFIG_MPLS=y |
265 | CONFIG_NET_MPLS_GSO=m | 269 | CONFIG_NET_MPLS_GSO=m |
266 | CONFIG_MPLS_ROUTING=m | 270 | CONFIG_MPLS_ROUTING=m |
271 | CONFIG_MPLS_IPTUNNEL=m | ||
267 | # CONFIG_WIRELESS is not set | 272 | # CONFIG_WIRELESS is not set |
268 | # CONFIG_UEVENT_HELPER is not set | 273 | # CONFIG_UEVENT_HELPER is not set |
269 | CONFIG_DEVTMPFS=y | 274 | CONFIG_DEVTMPFS=y |
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y | |||
341 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 346 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
342 | # CONFIG_NET_VENDOR_SEEQ is not set | 347 | # CONFIG_NET_VENDOR_SEEQ is not set |
343 | # CONFIG_NET_VENDOR_STMICRO is not set | 348 | # CONFIG_NET_VENDOR_STMICRO is not set |
349 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
344 | # CONFIG_NET_VENDOR_VIA is not set | 350 | # CONFIG_NET_VENDOR_VIA is not set |
345 | # CONFIG_NET_VENDOR_WIZNET is not set | 351 | # CONFIG_NET_VENDOR_WIZNET is not set |
346 | CONFIG_PPP=m | 352 | CONFIG_PPP=m |
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m | |||
489 | CONFIG_TEST_BPF=m | 495 | CONFIG_TEST_BPF=m |
490 | CONFIG_TEST_FIRMWARE=m | 496 | CONFIG_TEST_FIRMWARE=m |
491 | CONFIG_TEST_UDELAY=m | 497 | CONFIG_TEST_UDELAY=m |
498 | CONFIG_TEST_STATIC_KEYS=m | ||
492 | CONFIG_EARLY_PRINTK=y | 499 | CONFIG_EARLY_PRINTK=y |
493 | CONFIG_ENCRYPTED_KEYS=m | 500 | CONFIG_ENCRYPTED_KEYS=m |
494 | CONFIG_CRYPTO_RSA=m | 501 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h index 81ca118d58af..a644f4a53b94 100644 --- a/arch/m68k/include/asm/irq.h +++ b/arch/m68k/include/asm/irq.h | |||
@@ -64,8 +64,7 @@ extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, | |||
64 | struct pt_regs *)); | 64 | struct pt_regs *)); |
65 | extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt); | 65 | extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt); |
66 | extern void m68k_setup_irq_controller(struct irq_chip *, | 66 | extern void m68k_setup_irq_controller(struct irq_chip *, |
67 | void (*handle)(unsigned int irq, | 67 | void (*handle)(struct irq_desc *desc), |
68 | struct irq_desc *desc), | ||
69 | unsigned int irq, unsigned int cnt); | 68 | unsigned int irq, unsigned int cnt); |
70 | 69 | ||
71 | extern unsigned int irq_canonicalize(unsigned int irq); | 70 | extern unsigned int irq_canonicalize(unsigned int irq); |
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h | |||
@@ -4,4 +4,34 @@ | |||
4 | #define __ALIGN .align 4 | 4 | #define __ALIGN .align 4 |
5 | #define __ALIGN_STR ".align 4" | 5 | #define __ALIGN_STR ".align 4" |
6 | 6 | ||
7 | /* | ||
8 | * Make sure the compiler doesn't do anything stupid with the | ||
9 | * arguments on the stack - they are owned by the *caller*, not | ||
10 | * the callee. This just fools gcc into not spilling into them, | ||
11 | * and keeps it from doing tailcall recursion and/or using the | ||
12 | * stack slots for temporaries, since they are live and "used" | ||
13 | * all the way to the end of the function. | ||
14 | */ | ||
15 | #define asmlinkage_protect(n, ret, args...) \ | ||
16 | __asmlinkage_protect##n(ret, ##args) | ||
17 | #define __asmlinkage_protect_n(ret, args...) \ | ||
18 | __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) | ||
19 | #define __asmlinkage_protect0(ret) \ | ||
20 | __asmlinkage_protect_n(ret) | ||
21 | #define __asmlinkage_protect1(ret, arg1) \ | ||
22 | __asmlinkage_protect_n(ret, "m" (arg1)) | ||
23 | #define __asmlinkage_protect2(ret, arg1, arg2) \ | ||
24 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) | ||
25 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ | ||
26 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) | ||
27 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ | ||
28 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
29 | "m" (arg4)) | ||
30 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ | ||
31 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
32 | "m" (arg4), "m" (arg5)) | ||
33 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ | ||
34 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
35 | "m" (arg4), "m" (arg5), "m" (arg6)) | ||
36 | |||
7 | #endif | 37 | #endif |
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index fe3fc9ae1b69..53c632c85b03 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h | |||
@@ -261,7 +261,7 @@ extern void via_irq_enable(int); | |||
261 | extern void via_irq_disable(int); | 261 | extern void via_irq_disable(int); |
262 | extern void via_nubus_irq_startup(int irq); | 262 | extern void via_nubus_irq_startup(int irq); |
263 | extern void via_nubus_irq_shutdown(int irq); | 263 | extern void via_nubus_irq_shutdown(int irq); |
264 | extern void via1_irq(unsigned int irq, struct irq_desc *desc); | 264 | extern void via1_irq(struct irq_desc *desc); |
265 | extern void via1_set_head(int); | 265 | extern void via1_set_head(int); |
266 | extern int via2_scsi_drq_pending(void); | 266 | extern int via2_scsi_drq_pending(void); |
267 | 267 | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 356 | 7 | #define NR_syscalls 375 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -361,5 +361,24 @@ | |||
361 | #define __NR_memfd_create 353 | 361 | #define __NR_memfd_create 353 |
362 | #define __NR_bpf 354 | 362 | #define __NR_bpf 354 |
363 | #define __NR_execveat 355 | 363 | #define __NR_execveat 355 |
364 | #define __NR_socket 356 | ||
365 | #define __NR_socketpair 357 | ||
366 | #define __NR_bind 358 | ||
367 | #define __NR_connect 359 | ||
368 | #define __NR_listen 360 | ||
369 | #define __NR_accept4 361 | ||
370 | #define __NR_getsockopt 362 | ||
371 | #define __NR_setsockopt 363 | ||
372 | #define __NR_getsockname 364 | ||
373 | #define __NR_getpeername 365 | ||
374 | #define __NR_sendto 366 | ||
375 | #define __NR_sendmsg 367 | ||
376 | #define __NR_recvfrom 368 | ||
377 | #define __NR_recvmsg 369 | ||
378 | #define __NR_shutdown 370 | ||
379 | #define __NR_recvmmsg 371 | ||
380 | #define __NR_sendmmsg 372 | ||
381 | #define __NR_userfaultfd 373 | ||
382 | #define __NR_membarrier 374 | ||
364 | 383 | ||
365 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 384 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -376,4 +376,22 @@ ENTRY(sys_call_table) | |||
376 | .long sys_memfd_create | 376 | .long sys_memfd_create |
377 | .long sys_bpf | 377 | .long sys_bpf |
378 | .long sys_execveat /* 355 */ | 378 | .long sys_execveat /* 355 */ |
379 | 379 | .long sys_socket | |
380 | .long sys_socketpair | ||
381 | .long sys_bind | ||
382 | .long sys_connect | ||
383 | .long sys_listen /* 360 */ | ||
384 | .long sys_accept4 | ||
385 | .long sys_getsockopt | ||
386 | .long sys_setsockopt | ||
387 | .long sys_getsockname | ||
388 | .long sys_getpeername /* 365 */ | ||
389 | .long sys_sendto | ||
390 | .long sys_sendmsg | ||
391 | .long sys_recvfrom | ||
392 | .long sys_recvmsg | ||
393 | .long sys_shutdown /* 370 */ | ||
394 | .long sys_recvmmsg | ||
395 | .long sys_sendmmsg | ||
396 | .long sys_userfaultfd | ||
397 | .long sys_membarrier | ||
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c index 3fe0e43d44f6..f6f7d42713ec 100644 --- a/arch/m68k/mac/baboon.c +++ b/arch/m68k/mac/baboon.c | |||
@@ -45,7 +45,7 @@ void __init baboon_init(void) | |||
45 | * Baboon interrupt handler. This works a lot like a VIA. | 45 | * Baboon interrupt handler. This works a lot like a VIA. |
46 | */ | 46 | */ |
47 | 47 | ||
48 | static void baboon_irq(unsigned int irq, struct irq_desc *desc) | 48 | static void baboon_irq(struct irq_desc *desc) |
49 | { | 49 | { |
50 | int irq_bit, irq_num; | 50 | int irq_bit, irq_num; |
51 | unsigned char events; | 51 | unsigned char events; |
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 191610d97689..55d6592783f5 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c | |||
@@ -63,7 +63,7 @@ void __init oss_nubus_init(void) | |||
63 | * Handle miscellaneous OSS interrupts. | 63 | * Handle miscellaneous OSS interrupts. |
64 | */ | 64 | */ |
65 | 65 | ||
66 | static void oss_irq(unsigned int __irq, struct irq_desc *desc) | 66 | static void oss_irq(struct irq_desc *desc) |
67 | { | 67 | { |
68 | int events = oss->irq_pending & | 68 | int events = oss->irq_pending & |
69 | (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); | 69 | (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); |
@@ -99,7 +99,7 @@ static void oss_irq(unsigned int __irq, struct irq_desc *desc) | |||
99 | * Unlike the VIA/RBV this is on its own autovector interrupt level. | 99 | * Unlike the VIA/RBV this is on its own autovector interrupt level. |
100 | */ | 100 | */ |
101 | 101 | ||
102 | static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc) | 102 | static void oss_nubus_irq(struct irq_desc *desc) |
103 | { | 103 | { |
104 | int events, irq_bit, i; | 104 | int events, irq_bit, i; |
105 | 105 | ||
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 3b9e302e7a37..cd38f29955c8 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c | |||
@@ -113,7 +113,7 @@ void __init psc_init(void) | |||
113 | * PSC interrupt handler. It's a lot like the VIA interrupt handler. | 113 | * PSC interrupt handler. It's a lot like the VIA interrupt handler. |
114 | */ | 114 | */ |
115 | 115 | ||
116 | static void psc_irq(unsigned int __irq, struct irq_desc *desc) | 116 | static void psc_irq(struct irq_desc *desc) |
117 | { | 117 | { |
118 | unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); | 118 | unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); |
119 | unsigned int irq = irq_desc_get_irq(desc); | 119 | unsigned int irq = irq_desc_get_irq(desc); |
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index e198dec868e4..ce56e04386e7 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c | |||
@@ -446,7 +446,7 @@ void via_nubus_irq_shutdown(int irq) | |||
446 | * via6522.c :-), disable/pending masks added. | 446 | * via6522.c :-), disable/pending masks added. |
447 | */ | 447 | */ |
448 | 448 | ||
449 | void via1_irq(unsigned int irq, struct irq_desc *desc) | 449 | void via1_irq(struct irq_desc *desc) |
450 | { | 450 | { |
451 | int irq_num; | 451 | int irq_num; |
452 | unsigned char irq_bit, events; | 452 | unsigned char irq_bit, events; |
@@ -467,7 +467,7 @@ void via1_irq(unsigned int irq, struct irq_desc *desc) | |||
467 | } while (events >= irq_bit); | 467 | } while (events >= irq_bit); |
468 | } | 468 | } |
469 | 469 | ||
470 | static void via2_irq(unsigned int irq, struct irq_desc *desc) | 470 | static void via2_irq(struct irq_desc *desc) |
471 | { | 471 | { |
472 | int irq_num; | 472 | int irq_num; |
473 | unsigned char irq_bit, events; | 473 | unsigned char irq_bit, events; |
@@ -493,7 +493,7 @@ static void via2_irq(unsigned int irq, struct irq_desc *desc) | |||
493 | * VIA2 dispatcher as a fast interrupt handler. | 493 | * VIA2 dispatcher as a fast interrupt handler. |
494 | */ | 494 | */ |
495 | 495 | ||
496 | void via_nubus_irq(unsigned int irq, struct irq_desc *desc) | 496 | static void via_nubus_irq(struct irq_desc *desc) |
497 | { | 497 | { |
498 | int slot_irq; | 498 | int slot_irq; |
499 | unsigned char slot_bit, events; | 499 | unsigned char slot_bit, events; |
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild | |||
@@ -54,4 +54,5 @@ generic-y += ucontext.h | |||
54 | generic-y += unaligned.h | 54 | generic-y += unaligned.h |
55 | generic-y += user.h | 55 | generic-y += user.h |
56 | generic-y += vga.h | 56 | generic-y += vga.h |
57 | generic-y += word-at-a-time.h | ||
57 | generic-y += xor.h | 58 | generic-y += xor.h |
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index a336094a7a6c..3074b64793e6 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c | |||
@@ -94,13 +94,11 @@ void do_IRQ(int irq, struct pt_regs *regs) | |||
94 | "MOV D0.5,%0\n" | 94 | "MOV D0.5,%0\n" |
95 | "MOV D1Ar1,%1\n" | 95 | "MOV D1Ar1,%1\n" |
96 | "MOV D1RtP,%2\n" | 96 | "MOV D1RtP,%2\n" |
97 | "MOV D0Ar2,%3\n" | ||
98 | "SWAP A0StP,D0.5\n" | 97 | "SWAP A0StP,D0.5\n" |
99 | "SWAP PC,D1RtP\n" | 98 | "SWAP PC,D1RtP\n" |
100 | "MOV A0StP,D0.5\n" | 99 | "MOV A0StP,D0.5\n" |
101 | : | 100 | : |
102 | : "r" (isp), "r" (irq), "r" (desc->handle_irq), | 101 | : "r" (isp), "r" (desc), "r" (desc->handle_irq) |
103 | "r" (desc) | ||
104 | : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", | 102 | : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", |
105 | "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", | 103 | "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", |
106 | "D0.5" | 104 | "D0.5" |
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h | |||
10 | generic-y += preempt.h | 10 | generic-y += preempt.h |
11 | generic-y += syscalls.h | 11 | generic-y += syscalls.h |
12 | generic-y += trace_clock.h | 12 | generic-y += trace_clock.h |
13 | generic-y += word-at-a-time.h | ||
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 6b8b75266801..ae838ed5fcf2 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -863,7 +863,14 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) | |||
863 | 863 | ||
864 | void pcibios_fixup_bus(struct pci_bus *bus) | 864 | void pcibios_fixup_bus(struct pci_bus *bus) |
865 | { | 865 | { |
866 | /* Fixup the bus */ | 866 | /* When called from the generic PCI probe, read PCI<->PCI bridge |
867 | * bases. This is -not- called when generating the PCI tree from | ||
868 | * the OF device-tree. | ||
869 | */ | ||
870 | if (bus->self != NULL) | ||
871 | pci_read_bridge_bases(bus); | ||
872 | |||
873 | /* Now fixup the bus bus */ | ||
867 | pcibios_setup_bus_self(bus); | 874 | pcibios_setup_bus_self(bus); |
868 | 875 | ||
869 | /* Now fixup devices on that bus */ | 876 | /* Now fixup devices on that bus */ |
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 4c496c50edf6..da9f9220048f 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c | |||
@@ -851,7 +851,7 @@ static struct syscore_ops alchemy_gpic_pmops = { | |||
851 | 851 | ||
852 | /* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */ | 852 | /* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */ |
853 | #define DISP(name, base, addr) \ | 853 | #define DISP(name, base, addr) \ |
854 | static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \ | 854 | static void au1000_##name##_dispatch(struct irq_desc *d) \ |
855 | { \ | 855 | { \ |
856 | unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \ | 856 | unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \ |
857 | if (likely(r)) \ | 857 | if (likely(r)) \ |
@@ -865,7 +865,7 @@ DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT) | |||
865 | DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT) | 865 | DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT) |
866 | DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT) | 866 | DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT) |
867 | 867 | ||
868 | static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d) | 868 | static void alchemy_gpic_dispatch(struct irq_desc *d) |
869 | { | 869 | { |
870 | int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC); | 870 | int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC); |
871 | generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i); | 871 | generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i); |
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 324ad72d7c36..faeddf119fd4 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c | |||
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(bcsr_mod); | |||
86 | /* | 86 | /* |
87 | * DB1200/PB1200 CPLD IRQ muxer | 87 | * DB1200/PB1200 CPLD IRQ muxer |
88 | */ | 88 | */ |
89 | static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) | 89 | static void bcsr_csc_handler(struct irq_desc *d) |
90 | { | 90 | { |
91 | unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); | 91 | unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); |
92 | struct irq_chip *chip = irq_desc_get_chip(d); | 92 | struct irq_chip *chip = irq_desc_get_chip(d); |
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index ec9a371f1e62..8da996142d6a 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c | |||
@@ -69,7 +69,7 @@ static struct irqaction ar2315_ahb_err_interrupt = { | |||
69 | .name = "ar2315-ahb-error", | 69 | .name = "ar2315-ahb-error", |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc) | 72 | static void ar2315_misc_irq_handler(struct irq_desc *desc) |
73 | { | 73 | { |
74 | u32 pending = ar2315_rst_reg_read(AR2315_ISR) & | 74 | u32 pending = ar2315_rst_reg_read(AR2315_ISR) & |
75 | ar2315_rst_reg_read(AR2315_IMR); | 75 | ar2315_rst_reg_read(AR2315_IMR); |
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index e63e38fa4880..acd55a9cffe3 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c | |||
@@ -73,7 +73,7 @@ static struct irqaction ar5312_ahb_err_interrupt = { | |||
73 | .name = "ar5312-ahb-error", | 73 | .name = "ar5312-ahb-error", |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) | 76 | static void ar5312_misc_irq_handler(struct irq_desc *desc) |
77 | { | 77 | { |
78 | u32 pending = ar5312_rst_reg_read(AR5312_ISR) & | 78 | u32 pending = ar5312_rst_reg_read(AR5312_ISR) & |
79 | ar5312_rst_reg_read(AR5312_IMR); | 79 | ar5312_rst_reg_read(AR5312_IMR); |
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 807132b838b2..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include "common.h" | 26 | #include "common.h" |
27 | #include "machtypes.h" | 27 | #include "machtypes.h" |
28 | 28 | ||
29 | static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) | 29 | static void ath79_misc_irq_handler(struct irq_desc *desc) |
30 | { | 30 | { |
31 | void __iomem *base = ath79_reset_base; | 31 | void __iomem *base = ath79_reset_base; |
32 | u32 pending; | 32 | u32 pending; |
@@ -119,7 +119,7 @@ static void __init ath79_misc_irq_init(void) | |||
119 | irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); | 119 | irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); |
120 | } | 120 | } |
121 | 121 | ||
122 | static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) | 122 | static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) |
123 | { | 123 | { |
124 | u32 status; | 124 | u32 status; |
125 | 125 | ||
@@ -148,7 +148,7 @@ static void ar934x_ip2_irq_init(void) | |||
148 | irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); | 148 | irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) | 151 | static void qca955x_ip2_irq_dispatch(struct irq_desc *desc) |
152 | { | 152 | { |
153 | u32 status; | 153 | u32 status; |
154 | 154 | ||
@@ -171,7 +171,7 @@ static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) | |||
171 | } | 171 | } |
172 | } | 172 | } |
173 | 173 | ||
174 | static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) | 174 | static void qca955x_ip3_irq_dispatch(struct irq_desc *desc) |
175 | { | 175 | { |
176 | u32 status; | 176 | u32 status; |
177 | 177 | ||
@@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( | |||
293 | 293 | ||
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", | 296 | |
297 | ath79_misc_intc_of_init); | 297 | static int __init ar7100_misc_intc_of_init( |
298 | struct device_node *node, struct device_node *parent) | ||
299 | { | ||
300 | ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; | ||
301 | return ath79_misc_intc_of_init(node, parent); | ||
302 | } | ||
303 | |||
304 | IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", | ||
305 | ar7100_misc_intc_of_init); | ||
306 | |||
307 | static int __init ar7240_misc_intc_of_init( | ||
308 | struct device_node *node, struct device_node *parent) | ||
309 | { | ||
310 | ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; | ||
311 | return ath79_misc_intc_of_init(node, parent); | ||
312 | } | ||
313 | |||
314 | IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", | ||
315 | ar7240_misc_intc_of_init); | ||
298 | 316 | ||
299 | static int __init ar79_cpu_intc_of_init( | 317 | static int __init ar79_cpu_intc_of_init( |
300 | struct device_node *node, struct device_node *parent) | 318 | struct device_node *node, struct device_node *parent) |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index f26c3c661cca..0352bc8d56b3 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -2221,7 +2221,7 @@ static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) | |||
2221 | if (irqd_get_trigger_type(irq_data) & | 2221 | if (irqd_get_trigger_type(irq_data) & |
2222 | IRQ_TYPE_EDGE_BOTH) | 2222 | IRQ_TYPE_EDGE_BOTH) |
2223 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | 2223 | cvmx_write_csr(host_data->raw_reg, 1ull << i); |
2224 | generic_handle_irq_desc(irq, desc); | 2224 | generic_handle_irq_desc(desc); |
2225 | } | 2225 | } |
2226 | } | 2226 | } |
2227 | 2227 | ||
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void) | |||
933 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) | 933 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) |
934 | && (total < MAX_MEMORY)) { | 934 | && (total < MAX_MEMORY)) { |
935 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, | 935 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, |
936 | __pa_symbol(&__init_end), -1, | 936 | __pa_symbol(&_end), -1, |
937 | 0x100000, | 937 | 0x100000, |
938 | CVMX_BOOTMEM_FLAG_NO_LOCKING); | 938 | CVMX_BOOTMEM_FLAG_NO_LOCKING); |
939 | if (memory >= 0) { | 939 | if (memory >= 0) { |
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild | |||
@@ -17,4 +17,5 @@ generic-y += segment.h | |||
17 | generic-y += serial.h | 17 | generic-y += serial.h |
18 | generic-y += trace_clock.h | 18 | generic-y += trace_clock.h |
19 | generic-y += user.h | 19 | generic-y += user.h |
20 | generic-y += word-at-a-time.h | ||
20 | generic-y += xor.h | 21 | generic-y += xor.h |
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -20,6 +20,9 @@ | |||
20 | #ifndef cpu_has_tlb | 20 | #ifndef cpu_has_tlb |
21 | #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) | 21 | #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) |
22 | #endif | 22 | #endif |
23 | #ifndef cpu_has_ftlb | ||
24 | #define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) | ||
25 | #endif | ||
23 | #ifndef cpu_has_tlbinv | 26 | #ifndef cpu_has_tlbinv |
24 | #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) | 27 | #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) |
25 | #endif | 28 | #endif |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -385,6 +385,7 @@ enum cpu_type_enum { | |||
385 | #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ | 385 | #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ |
386 | #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ | 386 | #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ |
387 | #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ | 387 | #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ |
388 | #define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ | ||
388 | 389 | ||
389 | /* | 390 | /* |
390 | * CPU ASE encodings | 391 | * CPU ASE encodings |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 9e777cd42b67..d10fd80dbb7e 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si | |||
256 | */ | 256 | */ |
257 | #define ioremap_nocache(offset, size) \ | 257 | #define ioremap_nocache(offset, size) \ |
258 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | 258 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
259 | #define ioremap_uc ioremap_nocache | ||
259 | 260 | ||
260 | /* | 261 | /* |
261 | * ioremap_cachable - map bus memory into CPU space | 262 | * ioremap_cachable - map bus memory into CPU space |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e8c8d9d0c45f..5a1a882e0a75 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define KVM_PRIVATE_MEM_SLOTS 0 | 61 | #define KVM_PRIVATE_MEM_SLOTS 0 |
62 | 62 | ||
63 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 63 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
64 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | ||
64 | 65 | ||
65 | 66 | ||
66 | 67 | ||
@@ -128,6 +129,7 @@ struct kvm_vcpu_stat { | |||
128 | u32 msa_disabled_exits; | 129 | u32 msa_disabled_exits; |
129 | u32 flush_dcache_exits; | 130 | u32 flush_dcache_exits; |
130 | u32 halt_successful_poll; | 131 | u32 halt_successful_poll; |
132 | u32 halt_attempted_poll; | ||
131 | u32 halt_wakeup; | 133 | u32 halt_wakeup; |
132 | }; | 134 | }; |
133 | 135 | ||
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h | |||
@@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * maar_init() - initialise MAARs | ||
70 | * | ||
71 | * Performs initialisation of MAARs for the current CPU, making use of the | ||
72 | * platforms implementation of platform_maar_init where necessary and | ||
73 | * duplicating the setup it provides on secondary CPUs. | ||
74 | */ | ||
75 | extern void maar_init(void); | ||
76 | |||
77 | /** | ||
69 | * struct maar_config - MAAR configuration data | 78 | * struct maar_config - MAAR configuration data |
70 | * @lower: The lowest address that the MAAR pair will affect. Must be | 79 | * @lower: The lowest address that the MAAR pair will affect. Must be |
71 | * aligned to a 2^16 byte boundary. | 80 | * aligned to a 2^16 byte boundary. |
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h | |||
@@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) | |||
194 | BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) | 194 | BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) |
195 | BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) | 195 | BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) |
196 | BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) | 196 | BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) |
197 | BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) | ||
197 | 198 | ||
198 | /* Core Local & Core Other register accessor functions */ | 199 | /* Core Local & Core Other register accessor functions */ |
199 | BUILD_CM_Cx_RW(reset_release, 0x00) | 200 | BUILD_CM_Cx_RW(reset_release, 0x00) |
@@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) | |||
316 | #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 | 317 | #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 |
317 | #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) | 318 | #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) |
318 | 319 | ||
320 | /* GCR_SYS_CONFIG2 register fields */ | ||
321 | #define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 | ||
322 | #define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) | ||
323 | |||
319 | /* GCR_Cx_COHERENCE register fields */ | 324 | /* GCR_Cx_COHERENCE register fields */ |
320 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 | 325 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 |
321 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) | 326 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) |
@@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) | |||
405 | return read_gcr_rev(); | 410 | return read_gcr_rev(); |
406 | } | 411 | } |
407 | 412 | ||
413 | /** | ||
414 | * mips_cm_max_vp_width() - return the width in bits of VP indices | ||
415 | * | ||
416 | * Return: the width, in bits, of VP indices in fields that combine core & VP | ||
417 | * indices. | ||
418 | */ | ||
419 | static inline unsigned int mips_cm_max_vp_width(void) | ||
420 | { | ||
421 | extern int smp_num_siblings; | ||
422 | |||
423 | if (mips_cm_revision() >= CM_REV_CM3) | ||
424 | return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; | ||
425 | |||
426 | return smp_num_siblings; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * mips_cm_vp_id() - calculate the hardware VP ID for a CPU | ||
431 | * @cpu: the CPU whose VP ID to calculate | ||
432 | * | ||
433 | * Hardware such as the GIC uses identifiers for VPs which may not match the | ||
434 | * CPU numbers used by Linux. This function calculates the hardware VP | ||
435 | * identifier corresponding to a given CPU. | ||
436 | * | ||
437 | * Return: the VP ID for the CPU. | ||
438 | */ | ||
439 | static inline unsigned int mips_cm_vp_id(unsigned int cpu) | ||
440 | { | ||
441 | unsigned int core = cpu_data[cpu].core; | ||
442 | unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); | ||
443 | |||
444 | return (core * mips_cm_max_vp_width()) + vp; | ||
445 | } | ||
446 | |||
408 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ | 447 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -487,6 +487,8 @@ | |||
487 | 487 | ||
488 | /* Bits specific to the MIPS32/64 PRA. */ | 488 | /* Bits specific to the MIPS32/64 PRA. */ |
489 | #define MIPS_CONF_MT (_ULCAST_(7) << 7) | 489 | #define MIPS_CONF_MT (_ULCAST_(7) << 7) |
490 | #define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) | ||
491 | #define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) | ||
490 | #define MIPS_CONF_AR (_ULCAST_(7) << 10) | 492 | #define MIPS_CONF_AR (_ULCAST_(7) << 10) |
491 | #define MIPS_CONF_AT (_ULCAST_(3) << 13) | 493 | #define MIPS_CONF_AT (_ULCAST_(3) << 13) |
492 | #define MIPS_CONF_M (_ULCAST_(1) << 31) | 494 | #define MIPS_CONF_M (_ULCAST_(1) << 31) |
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index 2a4c128277e4..be52c2125d71 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h | |||
@@ -57,8 +57,8 @@ | |||
57 | #include <asm/mach-netlogic/multi-node.h> | 57 | #include <asm/mach-netlogic/multi-node.h> |
58 | 58 | ||
59 | struct irq_desc; | 59 | struct irq_desc; |
60 | void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); | 60 | void nlm_smp_function_ipi_handler(struct irq_desc *desc); |
61 | void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); | 61 | void nlm_smp_resched_ipi_handler(struct irq_desc *desc); |
62 | void nlm_smp_irq_init(int hwcpuid); | 62 | void nlm_smp_irq_init(int hwcpuid); |
63 | void nlm_boot_secondary_cpus(void); | 63 | void nlm_boot_secondary_cpus(void); |
64 | int nlm_wakeup_secondary_cpus(void); | 64 | int nlm_wakeup_secondary_cpus(void); |
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index c4ddc4f0d2dc..23cd9b118c9e 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h | |||
@@ -13,16 +13,15 @@ | |||
13 | 13 | ||
14 | #define __SWAB_64_THRU_32__ | 14 | #define __SWAB_64_THRU_32__ |
15 | 15 | ||
16 | #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ | 16 | #if !defined(__mips16) && \ |
17 | defined(_MIPS_ARCH_LOONGSON3A) | 17 | ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ |
18 | defined(_MIPS_ARCH_LOONGSON3A)) | ||
18 | 19 | ||
19 | static inline __attribute__((nomips16)) __attribute_const__ | 20 | static inline __attribute_const__ __u16 __arch_swab16(__u16 x) |
20 | __u16 __arch_swab16(__u16 x) | ||
21 | { | 21 | { |
22 | __asm__( | 22 | __asm__( |
23 | " .set push \n" | 23 | " .set push \n" |
24 | " .set arch=mips32r2 \n" | 24 | " .set arch=mips32r2 \n" |
25 | " .set nomips16 \n" | ||
26 | " wsbh %0, %1 \n" | 25 | " wsbh %0, %1 \n" |
27 | " .set pop \n" | 26 | " .set pop \n" |
28 | : "=r" (x) | 27 | : "=r" (x) |
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__ | |||
32 | } | 31 | } |
33 | #define __arch_swab16 __arch_swab16 | 32 | #define __arch_swab16 __arch_swab16 |
34 | 33 | ||
35 | static inline __attribute__((nomips16)) __attribute_const__ | 34 | static inline __attribute_const__ __u32 __arch_swab32(__u32 x) |
36 | __u32 __arch_swab32(__u32 x) | ||
37 | { | 35 | { |
38 | __asm__( | 36 | __asm__( |
39 | " .set push \n" | 37 | " .set push \n" |
40 | " .set arch=mips32r2 \n" | 38 | " .set arch=mips32r2 \n" |
41 | " .set nomips16 \n" | ||
42 | " wsbh %0, %1 \n" | 39 | " wsbh %0, %1 \n" |
43 | " rotr %0, %0, 16 \n" | 40 | " rotr %0, %0, 16 \n" |
44 | " .set pop \n" | 41 | " .set pop \n" |
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__ | |||
54 | * 64-bit kernel on r2 CPUs. | 51 | * 64-bit kernel on r2 CPUs. |
55 | */ | 52 | */ |
56 | #ifdef __mips64 | 53 | #ifdef __mips64 |
57 | static inline __attribute__((nomips16)) __attribute_const__ | 54 | static inline __attribute_const__ __u64 __arch_swab64(__u64 x) |
58 | __u64 __arch_swab64(__u64 x) | ||
59 | { | 55 | { |
60 | __asm__( | 56 | __asm__( |
61 | " .set push \n" | 57 | " .set push \n" |
62 | " .set arch=mips64r2 \n" | 58 | " .set arch=mips64r2 \n" |
63 | " .set nomips16 \n" | ||
64 | " dsbh %0, %1 \n" | 59 | " dsbh %0, %1 \n" |
65 | " dshd %0, %0 \n" | 60 | " dshd %0, %0 \n" |
66 | " .set pop \n" | 61 | " .set pop \n" |
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__ | |||
71 | } | 66 | } |
72 | #define __arch_swab64 __arch_swab64 | 67 | #define __arch_swab64 __arch_swab64 |
73 | #endif /* __mips64 */ | 68 | #endif /* __mips64 */ |
74 | #endif /* MIPS R2 or newer or Loongson 3A */ | 69 | #endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */ |
75 | #endif /* _ASM_SWAB_H */ | 70 | #endif /* _ASM_SWAB_H */ |
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h | |||
@@ -377,16 +377,18 @@ | |||
377 | #define __NR_memfd_create (__NR_Linux + 354) | 377 | #define __NR_memfd_create (__NR_Linux + 354) |
378 | #define __NR_bpf (__NR_Linux + 355) | 378 | #define __NR_bpf (__NR_Linux + 355) |
379 | #define __NR_execveat (__NR_Linux + 356) | 379 | #define __NR_execveat (__NR_Linux + 356) |
380 | #define __NR_userfaultfd (__NR_Linux + 357) | ||
381 | #define __NR_membarrier (__NR_Linux + 358) | ||
380 | 382 | ||
381 | /* | 383 | /* |
382 | * Offset of the last Linux o32 flavoured syscall | 384 | * Offset of the last Linux o32 flavoured syscall |
383 | */ | 385 | */ |
384 | #define __NR_Linux_syscalls 356 | 386 | #define __NR_Linux_syscalls 358 |
385 | 387 | ||
386 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 388 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
387 | 389 | ||
388 | #define __NR_O32_Linux 4000 | 390 | #define __NR_O32_Linux 4000 |
389 | #define __NR_O32_Linux_syscalls 356 | 391 | #define __NR_O32_Linux_syscalls 358 |
390 | 392 | ||
391 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 393 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
392 | 394 | ||
@@ -711,16 +713,18 @@ | |||
711 | #define __NR_memfd_create (__NR_Linux + 314) | 713 | #define __NR_memfd_create (__NR_Linux + 314) |
712 | #define __NR_bpf (__NR_Linux + 315) | 714 | #define __NR_bpf (__NR_Linux + 315) |
713 | #define __NR_execveat (__NR_Linux + 316) | 715 | #define __NR_execveat (__NR_Linux + 316) |
716 | #define __NR_userfaultfd (__NR_Linux + 317) | ||
717 | #define __NR_membarrier (__NR_Linux + 318) | ||
714 | 718 | ||
715 | /* | 719 | /* |
716 | * Offset of the last Linux 64-bit flavoured syscall | 720 | * Offset of the last Linux 64-bit flavoured syscall |
717 | */ | 721 | */ |
718 | #define __NR_Linux_syscalls 316 | 722 | #define __NR_Linux_syscalls 318 |
719 | 723 | ||
720 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 724 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
721 | 725 | ||
722 | #define __NR_64_Linux 5000 | 726 | #define __NR_64_Linux 5000 |
723 | #define __NR_64_Linux_syscalls 316 | 727 | #define __NR_64_Linux_syscalls 318 |
724 | 728 | ||
725 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 729 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
726 | 730 | ||
@@ -1049,15 +1053,17 @@ | |||
1049 | #define __NR_memfd_create (__NR_Linux + 318) | 1053 | #define __NR_memfd_create (__NR_Linux + 318) |
1050 | #define __NR_bpf (__NR_Linux + 319) | 1054 | #define __NR_bpf (__NR_Linux + 319) |
1051 | #define __NR_execveat (__NR_Linux + 320) | 1055 | #define __NR_execveat (__NR_Linux + 320) |
1056 | #define __NR_userfaultfd (__NR_Linux + 321) | ||
1057 | #define __NR_membarrier (__NR_Linux + 322) | ||
1052 | 1058 | ||
1053 | /* | 1059 | /* |
1054 | * Offset of the last N32 flavoured syscall | 1060 | * Offset of the last N32 flavoured syscall |
1055 | */ | 1061 | */ |
1056 | #define __NR_Linux_syscalls 320 | 1062 | #define __NR_Linux_syscalls 322 |
1057 | 1063 | ||
1058 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1064 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
1059 | 1065 | ||
1060 | #define __NR_N32_Linux 6000 | 1066 | #define __NR_N32_Linux 6000 |
1061 | #define __NR_N32_Linux_syscalls 320 | 1067 | #define __NR_N32_Linux_syscalls 322 |
1062 | 1068 | ||
1063 | #endif /* _UAPI_ASM_UNISTD_H */ | 1069 | #endif /* _UAPI_ASM_UNISTD_H */ |
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/power/jz4740-battery.h> | 26 | #include <linux/power/jz4740-battery.h> |
27 | #include <linux/power/gpio-charger.h> | 27 | #include <linux/power/gpio-charger.h> |
28 | 28 | ||
29 | #include <asm/mach-jz4740/gpio.h> | ||
29 | #include <asm/mach-jz4740/jz4740_fb.h> | 30 | #include <asm/mach-jz4740/jz4740_fb.h> |
30 | #include <asm/mach-jz4740/jz4740_mmc.h> | 31 | #include <asm/mach-jz4740/jz4740_mmc.h> |
31 | #include <asm/mach-jz4740/jz4740_nand.h> | 32 | #include <asm/mach-jz4740/jz4740_nand.h> |
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 6cd69fdaa1c5..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | 29 | ||
30 | #include <asm/mach-jz4740/base.h> | 30 | #include <asm/mach-jz4740/base.h> |
31 | #include <asm/mach-jz4740/gpio.h> | ||
31 | 32 | ||
32 | #define JZ4740_GPIO_BASE_A (32*0) | 33 | #define JZ4740_GPIO_BASE_A (32*0) |
33 | #define JZ4740_GPIO_BASE_B (32*1) | 34 | #define JZ4740_GPIO_BASE_B (32*1) |
@@ -291,7 +292,7 @@ static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int i | |||
291 | writel(mask, reg); | 292 | writel(mask, reg); |
292 | } | 293 | } |
293 | 294 | ||
294 | static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) | 295 | static void jz_gpio_irq_demux_handler(struct irq_desc *desc) |
295 | { | 296 | { |
296 | uint32_t flag; | 297 | uint32_t flag; |
297 | unsigned int gpio_irq; | 298 | unsigned int gpio_irq; |
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S | |||
@@ -39,6 +39,7 @@ | |||
39 | mfc0 \dest, CP0_CONFIG, 3 | 39 | mfc0 \dest, CP0_CONFIG, 3 |
40 | andi \dest, \dest, MIPS_CONF3_MT | 40 | andi \dest, \dest, MIPS_CONF3_MT |
41 | beqz \dest, \nomt | 41 | beqz \dest, \nomt |
42 | nop | ||
42 | .endm | 43 | .endm |
43 | 44 | ||
44 | .section .text.cps-vec | 45 | .section .text.cps-vec |
@@ -223,10 +224,9 @@ LEAF(excep_ejtag) | |||
223 | END(excep_ejtag) | 224 | END(excep_ejtag) |
224 | 225 | ||
225 | LEAF(mips_cps_core_init) | 226 | LEAF(mips_cps_core_init) |
226 | #ifdef CONFIG_MIPS_MT | 227 | #ifdef CONFIG_MIPS_MT_SMP |
227 | /* Check that the core implements the MT ASE */ | 228 | /* Check that the core implements the MT ASE */ |
228 | has_mt t0, 3f | 229 | has_mt t0, 3f |
229 | nop | ||
230 | 230 | ||
231 | .set push | 231 | .set push |
232 | .set mips64r2 | 232 | .set mips64r2 |
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) | |||
310 | PTR_ADDU t0, t0, t1 | 310 | PTR_ADDU t0, t0, t1 |
311 | 311 | ||
312 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ | 312 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ |
313 | li t9, 0 | ||
314 | #ifdef CONFIG_MIPS_MT_SMP | ||
313 | has_mt ta2, 1f | 315 | has_mt ta2, 1f |
314 | li t9, 0 | ||
315 | 316 | ||
316 | /* Find the number of VPEs present in the core */ | 317 | /* Find the number of VPEs present in the core */ |
317 | mfc0 t1, CP0_MVPCONF0 | 318 | mfc0 t1, CP0_MVPCONF0 |
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) | |||
330 | /* Retrieve the VPE ID from EBase.CPUNum */ | 331 | /* Retrieve the VPE ID from EBase.CPUNum */ |
331 | mfc0 t9, $15, 1 | 332 | mfc0 t9, $15, 1 |
332 | and t9, t9, t1 | 333 | and t9, t9, t1 |
334 | #endif | ||
333 | 335 | ||
334 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ | 336 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ |
335 | li t1, VPEBOOTCFG_SIZE | 337 | li t1, VPEBOOTCFG_SIZE |
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) | |||
337 | PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) | 339 | PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) |
338 | PTR_ADDU v0, v0, ta3 | 340 | PTR_ADDU v0, v0, ta3 |
339 | 341 | ||
340 | #ifdef CONFIG_MIPS_MT | 342 | #ifdef CONFIG_MIPS_MT_SMP |
341 | 343 | ||
342 | /* If the core doesn't support MT then return */ | 344 | /* If the core doesn't support MT then return */ |
343 | bnez ta2, 1f | 345 | bnez ta2, 1f |
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) | |||
451 | 453 | ||
452 | 2: .set pop | 454 | 2: .set pop |
453 | 455 | ||
454 | #endif /* CONFIG_MIPS_MT */ | 456 | #endif /* CONFIG_MIPS_MT_SMP */ |
455 | 457 | ||
456 | /* Return */ | 458 | /* Return */ |
457 | jr ra | 459 | jr ra |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) | |||
410 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) | 410 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) |
411 | { | 411 | { |
412 | unsigned int config0; | 412 | unsigned int config0; |
413 | int isa; | 413 | int isa, mt; |
414 | 414 | ||
415 | config0 = read_c0_config(); | 415 | config0 = read_c0_config(); |
416 | 416 | ||
417 | /* | 417 | /* |
418 | * Look for Standard TLB or Dual VTLB and FTLB | 418 | * Look for Standard TLB or Dual VTLB and FTLB |
419 | */ | 419 | */ |
420 | if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || | 420 | mt = config0 & MIPS_CONF_MT; |
421 | (((config0 & MIPS_CONF_MT) >> 7) == 4)) | 421 | if (mt == MIPS_CONF_MT_TLB) |
422 | c->options |= MIPS_CPU_TLB; | 422 | c->options |= MIPS_CPU_TLB; |
423 | else if (mt == MIPS_CONF_MT_FTLB) | ||
424 | c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; | ||
423 | 425 | ||
424 | isa = (config0 & MIPS_CONF_AT) >> 13; | 426 | isa = (config0 & MIPS_CONF_AT) >> 13; |
425 | switch (isa) { | 427 | switch (isa) { |
@@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) | |||
559 | if (cpu_has_tlb) { | 561 | if (cpu_has_tlb) { |
560 | if (((config4 & MIPS_CONF4_IE) >> 29) == 2) | 562 | if (((config4 & MIPS_CONF4_IE) >> 29) == 2) |
561 | c->options |= MIPS_CPU_TLBINV; | 563 | c->options |= MIPS_CPU_TLBINV; |
564 | |||
562 | /* | 565 | /* |
563 | * This is a bit ugly. R6 has dropped that field from | 566 | * R6 has dropped the MMUExtDef field from config4. |
564 | * config4 and the only valid configuration is VTLB+FTLB so | 567 | * On R6 the fields always describe the FTLB, and only if it is |
565 | * set a good value for mmuextdef for that case. | 568 | * present according to Config.MT. |
566 | */ | 569 | */ |
567 | if (cpu_has_mips_r6) | 570 | if (!cpu_has_mips_r6) |
571 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | ||
572 | else if (cpu_has_ftlb) | ||
568 | mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; | 573 | mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; |
569 | else | 574 | else |
570 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | 575 | mmuextdef = 0; |
571 | 576 | ||
572 | switch (mmuextdef) { | 577 | switch (mmuextdef) { |
573 | case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: | 578 | case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: |
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -18,7 +18,7 @@ | |||
18 | .set pop | 18 | .set pop |
19 | /* | 19 | /* |
20 | * task_struct *resume(task_struct *prev, task_struct *next, | 20 | * task_struct *resume(task_struct *prev, task_struct *next, |
21 | * struct thread_info *next_ti, int usedfpu) | 21 | * struct thread_info *next_ti) |
22 | */ | 22 | */ |
23 | .align 7 | 23 | .align 7 |
24 | LEAF(resume) | 24 | LEAF(resume) |
@@ -28,30 +28,6 @@ | |||
28 | cpu_save_nonscratch a0 | 28 | cpu_save_nonscratch a0 |
29 | LONG_S ra, THREAD_REG31(a0) | 29 | LONG_S ra, THREAD_REG31(a0) |
30 | 30 | ||
31 | /* | ||
32 | * check if we need to save FPU registers | ||
33 | */ | ||
34 | .set push | ||
35 | .set noreorder | ||
36 | beqz a3, 1f | ||
37 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
38 | .set pop | ||
39 | |||
40 | /* | ||
41 | * clear saved user stack CU1 bit | ||
42 | */ | ||
43 | LONG_L t0, ST_OFF(t3) | ||
44 | li t1, ~ST0_CU1 | ||
45 | and t0, t0, t1 | ||
46 | LONG_S t0, ST_OFF(t3) | ||
47 | |||
48 | .set push | ||
49 | .set arch=mips64r2 | ||
50 | fpu_save_double a0 t0 t1 # c0_status passed in t0 | ||
51 | # clobbers t1 | ||
52 | .set pop | ||
53 | 1: | ||
54 | |||
55 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | 31 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 |
56 | /* Check if we need to store CVMSEG state */ | 32 | /* Check if we need to store CVMSEG state */ |
57 | dmfc0 t0, $11,7 /* CvmMemCtl */ | 33 | dmfc0 t0, $11,7 /* CvmMemCtl */ |
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S | |||
@@ -31,18 +31,8 @@ | |||
31 | #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) | 31 | #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * FPU context is saved iff the process has used it's FPU in the current | ||
35 | * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user | ||
36 | * space STATUS register should be 0, so that a process *always* starts its | ||
37 | * userland with FPU disabled after each context switch. | ||
38 | * | ||
39 | * FPU will be enabled as soon as the process accesses FPU again, through | ||
40 | * do_cpu() trap. | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | * task_struct *resume(task_struct *prev, task_struct *next, | 34 | * task_struct *resume(task_struct *prev, task_struct *next, |
45 | * struct thread_info *next_ti, int usedfpu) | 35 | * struct thread_info *next_ti) |
46 | */ | 36 | */ |
47 | LEAF(resume) | 37 | LEAF(resume) |
48 | mfc0 t1, CP0_STATUS | 38 | mfc0 t1, CP0_STATUS |
@@ -50,22 +40,6 @@ LEAF(resume) | |||
50 | cpu_save_nonscratch a0 | 40 | cpu_save_nonscratch a0 |
51 | sw ra, THREAD_REG31(a0) | 41 | sw ra, THREAD_REG31(a0) |
52 | 42 | ||
53 | beqz a3, 1f | ||
54 | |||
55 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
56 | |||
57 | /* | ||
58 | * clear saved user stack CU1 bit | ||
59 | */ | ||
60 | lw t0, ST_OFF(t3) | ||
61 | li t1, ~ST0_CU1 | ||
62 | and t0, t0, t1 | ||
63 | sw t0, ST_OFF(t3) | ||
64 | |||
65 | fpu_save_single a0, t0 # clobbers t0 | ||
66 | |||
67 | 1: | ||
68 | |||
69 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 43 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
70 | PTR_LA t8, __stack_chk_guard | 44 | PTR_LA t8, __stack_chk_guard |
71 | LONG_L t9, TASK_STACK_CANARY(a1) | 45 | LONG_L t9, TASK_STACK_CANARY(a1) |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) | |||
36 | lw t1, PT_EPC(sp) # skip syscall on return | 36 | lw t1, PT_EPC(sp) # skip syscall on return |
37 | 37 | ||
38 | subu v0, v0, __NR_O32_Linux # check syscall number | 38 | subu v0, v0, __NR_O32_Linux # check syscall number |
39 | sltiu t0, v0, __NR_O32_Linux_syscalls + 1 | ||
40 | addiu t1, 4 # skip to next instruction | 39 | addiu t1, 4 # skip to next instruction |
41 | sw t1, PT_EPC(sp) | 40 | sw t1, PT_EPC(sp) |
42 | beqz t0, illegal_syscall | ||
43 | |||
44 | sll t0, v0, 2 | ||
45 | la t1, sys_call_table | ||
46 | addu t1, t0 | ||
47 | lw t2, (t1) # syscall routine | ||
48 | beqz t2, illegal_syscall | ||
49 | 41 | ||
50 | sw a3, PT_R26(sp) # save a3 for syscall restarting | 42 | sw a3, PT_R26(sp) # save a3 for syscall restarting |
51 | 43 | ||
@@ -96,6 +88,16 @@ loads_done: | |||
96 | li t1, _TIF_WORK_SYSCALL_ENTRY | 88 | li t1, _TIF_WORK_SYSCALL_ENTRY |
97 | and t0, t1 | 89 | and t0, t1 |
98 | bnez t0, syscall_trace_entry # -> yes | 90 | bnez t0, syscall_trace_entry # -> yes |
91 | syscall_common: | ||
92 | sltiu t0, v0, __NR_O32_Linux_syscalls + 1 | ||
93 | beqz t0, illegal_syscall | ||
94 | |||
95 | sll t0, v0, 2 | ||
96 | la t1, sys_call_table | ||
97 | addu t1, t0 | ||
98 | lw t2, (t1) # syscall routine | ||
99 | |||
100 | beqz t2, illegal_syscall | ||
99 | 101 | ||
100 | jalr t2 # Do The Real Thing (TM) | 102 | jalr t2 # Do The Real Thing (TM) |
101 | 103 | ||
@@ -116,7 +118,7 @@ o32_syscall_exit: | |||
116 | 118 | ||
117 | syscall_trace_entry: | 119 | syscall_trace_entry: |
118 | SAVE_STATIC | 120 | SAVE_STATIC |
119 | move s0, t2 | 121 | move s0, v0 |
120 | move a0, sp | 122 | move a0, sp |
121 | 123 | ||
122 | /* | 124 | /* |
@@ -129,27 +131,18 @@ syscall_trace_entry: | |||
129 | 131 | ||
130 | 1: jal syscall_trace_enter | 132 | 1: jal syscall_trace_enter |
131 | 133 | ||
132 | bltz v0, 2f # seccomp failed? Skip syscall | 134 | bltz v0, 1f # seccomp failed? Skip syscall |
135 | |||
136 | move v0, s0 # restore syscall | ||
133 | 137 | ||
134 | move t0, s0 | ||
135 | RESTORE_STATIC | 138 | RESTORE_STATIC |
136 | lw a0, PT_R4(sp) # Restore argument registers | 139 | lw a0, PT_R4(sp) # Restore argument registers |
137 | lw a1, PT_R5(sp) | 140 | lw a1, PT_R5(sp) |
138 | lw a2, PT_R6(sp) | 141 | lw a2, PT_R6(sp) |
139 | lw a3, PT_R7(sp) | 142 | lw a3, PT_R7(sp) |
140 | jalr t0 | 143 | j syscall_common |
141 | |||
142 | li t0, -EMAXERRNO - 1 # error? | ||
143 | sltu t0, t0, v0 | ||
144 | sw t0, PT_R7(sp) # set error flag | ||
145 | beqz t0, 1f | ||
146 | |||
147 | lw t1, PT_R2(sp) # syscall number | ||
148 | negu v0 # error | ||
149 | sw t1, PT_R0(sp) # save it for syscall restarting | ||
150 | 1: sw v0, PT_R2(sp) # result | ||
151 | 144 | ||
152 | 2: j syscall_exit | 145 | 1: j syscall_exit |
153 | 146 | ||
154 | /* ------------------------------------------------------------------------ */ | 147 | /* ------------------------------------------------------------------------ */ |
155 | 148 | ||
@@ -599,3 +592,5 @@ EXPORT(sys_call_table) | |||
599 | PTR sys_memfd_create | 592 | PTR sys_memfd_create |
600 | PTR sys_bpf /* 4355 */ | 593 | PTR sys_bpf /* 4355 */ |
601 | PTR sys_execveat | 594 | PTR sys_execveat |
595 | PTR sys_userfaultfd | ||
596 | PTR sys_membarrier | ||
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
39 | .set at | 39 | .set at |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | dsubu t0, v0, __NR_64_Linux # check syscall number | ||
43 | sltiu t0, t0, __NR_64_Linux_syscalls + 1 | ||
44 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) | 42 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) |
45 | ld t1, PT_EPC(sp) # skip syscall on return | 43 | ld t1, PT_EPC(sp) # skip syscall on return |
46 | daddiu t1, 4 # skip to next instruction | 44 | daddiu t1, 4 # skip to next instruction |
47 | sd t1, PT_EPC(sp) | 45 | sd t1, PT_EPC(sp) |
48 | #endif | 46 | #endif |
49 | beqz t0, illegal_syscall | ||
50 | |||
51 | dsll t0, v0, 3 # offset into table | ||
52 | ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) | ||
53 | # syscall routine | ||
54 | 47 | ||
55 | sd a3, PT_R26(sp) # save a3 for syscall restarting | 48 | sd a3, PT_R26(sp) # save a3 for syscall restarting |
56 | 49 | ||
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
59 | and t0, t1, t0 | 52 | and t0, t1, t0 |
60 | bnez t0, syscall_trace_entry | 53 | bnez t0, syscall_trace_entry |
61 | 54 | ||
55 | syscall_common: | ||
56 | dsubu t2, v0, __NR_64_Linux | ||
57 | sltiu t0, t2, __NR_64_Linux_syscalls + 1 | ||
58 | beqz t0, illegal_syscall | ||
59 | |||
60 | dsll t0, t2, 3 # offset into table | ||
61 | dla t2, sys_call_table | ||
62 | daddu t0, t2, t0 | ||
63 | ld t2, (t0) # syscall routine | ||
64 | beqz t2, illegal_syscall | ||
65 | |||
62 | jalr t2 # Do The Real Thing (TM) | 66 | jalr t2 # Do The Real Thing (TM) |
63 | 67 | ||
64 | li t0, -EMAXERRNO - 1 # error? | 68 | li t0, -EMAXERRNO - 1 # error? |
@@ -78,14 +82,14 @@ n64_syscall_exit: | |||
78 | 82 | ||
79 | syscall_trace_entry: | 83 | syscall_trace_entry: |
80 | SAVE_STATIC | 84 | SAVE_STATIC |
81 | move s0, t2 | 85 | move s0, v0 |
82 | move a0, sp | 86 | move a0, sp |
83 | move a1, v0 | 87 | move a1, v0 |
84 | jal syscall_trace_enter | 88 | jal syscall_trace_enter |
85 | 89 | ||
86 | bltz v0, 2f # seccomp failed? Skip syscall | 90 | bltz v0, 1f # seccomp failed? Skip syscall |
87 | 91 | ||
88 | move t0, s0 | 92 | move v0, s0 |
89 | RESTORE_STATIC | 93 | RESTORE_STATIC |
90 | ld a0, PT_R4(sp) # Restore argument registers | 94 | ld a0, PT_R4(sp) # Restore argument registers |
91 | ld a1, PT_R5(sp) | 95 | ld a1, PT_R5(sp) |
@@ -93,19 +97,9 @@ syscall_trace_entry: | |||
93 | ld a3, PT_R7(sp) | 97 | ld a3, PT_R7(sp) |
94 | ld a4, PT_R8(sp) | 98 | ld a4, PT_R8(sp) |
95 | ld a5, PT_R9(sp) | 99 | ld a5, PT_R9(sp) |
96 | jalr t0 | 100 | j syscall_common |
97 | |||
98 | li t0, -EMAXERRNO - 1 # error? | ||
99 | sltu t0, t0, v0 | ||
100 | sd t0, PT_R7(sp) # set error flag | ||
101 | beqz t0, 1f | ||
102 | |||
103 | ld t1, PT_R2(sp) # syscall number | ||
104 | dnegu v0 # error | ||
105 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
106 | 1: sd v0, PT_R2(sp) # result | ||
107 | 101 | ||
108 | 2: j syscall_exit | 102 | 1: j syscall_exit |
109 | 103 | ||
110 | illegal_syscall: | 104 | illegal_syscall: |
111 | /* This also isn't a 64-bit syscall, throw an error. */ | 105 | /* This also isn't a 64-bit syscall, throw an error. */ |
@@ -436,4 +430,6 @@ EXPORT(sys_call_table) | |||
436 | PTR sys_memfd_create | 430 | PTR sys_memfd_create |
437 | PTR sys_bpf /* 5315 */ | 431 | PTR sys_bpf /* 5315 */ |
438 | PTR sys_execveat | 432 | PTR sys_execveat |
433 | PTR sys_userfaultfd | ||
434 | PTR sys_membarrier | ||
439 | .size sys_call_table,.-sys_call_table | 435 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) | |||
52 | and t0, t1, t0 | 52 | and t0, t1, t0 |
53 | bnez t0, n32_syscall_trace_entry | 53 | bnez t0, n32_syscall_trace_entry |
54 | 54 | ||
55 | syscall_common: | ||
55 | jalr t2 # Do The Real Thing (TM) | 56 | jalr t2 # Do The Real Thing (TM) |
56 | 57 | ||
57 | li t0, -EMAXERRNO - 1 # error? | 58 | li t0, -EMAXERRNO - 1 # error? |
@@ -75,9 +76,9 @@ n32_syscall_trace_entry: | |||
75 | move a1, v0 | 76 | move a1, v0 |
76 | jal syscall_trace_enter | 77 | jal syscall_trace_enter |
77 | 78 | ||
78 | bltz v0, 2f # seccomp failed? Skip syscall | 79 | bltz v0, 1f # seccomp failed? Skip syscall |
79 | 80 | ||
80 | move t0, s0 | 81 | move t2, s0 |
81 | RESTORE_STATIC | 82 | RESTORE_STATIC |
82 | ld a0, PT_R4(sp) # Restore argument registers | 83 | ld a0, PT_R4(sp) # Restore argument registers |
83 | ld a1, PT_R5(sp) | 84 | ld a1, PT_R5(sp) |
@@ -85,19 +86,9 @@ n32_syscall_trace_entry: | |||
85 | ld a3, PT_R7(sp) | 86 | ld a3, PT_R7(sp) |
86 | ld a4, PT_R8(sp) | 87 | ld a4, PT_R8(sp) |
87 | ld a5, PT_R9(sp) | 88 | ld a5, PT_R9(sp) |
88 | jalr t0 | 89 | j syscall_common |
89 | 90 | ||
90 | li t0, -EMAXERRNO - 1 # error? | 91 | 1: j syscall_exit |
91 | sltu t0, t0, v0 | ||
92 | sd t0, PT_R7(sp) # set error flag | ||
93 | beqz t0, 1f | ||
94 | |||
95 | ld t1, PT_R2(sp) # syscall number | ||
96 | dnegu v0 # error | ||
97 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
98 | 1: sd v0, PT_R2(sp) # result | ||
99 | |||
100 | 2: j syscall_exit | ||
101 | 92 | ||
102 | not_n32_scall: | 93 | not_n32_scall: |
103 | /* This is not an n32 compatibility syscall, pass it on to | 94 | /* This is not an n32 compatibility syscall, pass it on to |
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) | |||
429 | PTR sys_memfd_create | 420 | PTR sys_memfd_create |
430 | PTR sys_bpf | 421 | PTR sys_bpf |
431 | PTR compat_sys_execveat /* 6320 */ | 422 | PTR compat_sys_execveat /* 6320 */ |
423 | PTR sys_userfaultfd | ||
424 | PTR sys_membarrier | ||
432 | .size sysn32_call_table,.-sysn32_call_table | 425 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -87,6 +87,7 @@ loads_done: | |||
87 | and t0, t1, t0 | 87 | and t0, t1, t0 |
88 | bnez t0, trace_a_syscall | 88 | bnez t0, trace_a_syscall |
89 | 89 | ||
90 | syscall_common: | ||
90 | jalr t2 # Do The Real Thing (TM) | 91 | jalr t2 # Do The Real Thing (TM) |
91 | 92 | ||
92 | li t0, -EMAXERRNO - 1 # error? | 93 | li t0, -EMAXERRNO - 1 # error? |
@@ -130,9 +131,9 @@ trace_a_syscall: | |||
130 | 131 | ||
131 | 1: jal syscall_trace_enter | 132 | 1: jal syscall_trace_enter |
132 | 133 | ||
133 | bltz v0, 2f # seccomp failed? Skip syscall | 134 | bltz v0, 1f # seccomp failed? Skip syscall |
134 | 135 | ||
135 | move t0, s0 | 136 | move t2, s0 |
136 | RESTORE_STATIC | 137 | RESTORE_STATIC |
137 | ld a0, PT_R4(sp) # Restore argument registers | 138 | ld a0, PT_R4(sp) # Restore argument registers |
138 | ld a1, PT_R5(sp) | 139 | ld a1, PT_R5(sp) |
@@ -142,19 +143,9 @@ trace_a_syscall: | |||
142 | ld a5, PT_R9(sp) | 143 | ld a5, PT_R9(sp) |
143 | ld a6, PT_R10(sp) | 144 | ld a6, PT_R10(sp) |
144 | ld a7, PT_R11(sp) # For indirect syscalls | 145 | ld a7, PT_R11(sp) # For indirect syscalls |
145 | jalr t0 | 146 | j syscall_common |
146 | 147 | ||
147 | li t0, -EMAXERRNO - 1 # error? | 148 | 1: j syscall_exit |
148 | sltu t0, t0, v0 | ||
149 | sd t0, PT_R7(sp) # set error flag | ||
150 | beqz t0, 1f | ||
151 | |||
152 | ld t1, PT_R2(sp) # syscall number | ||
153 | dnegu v0 # error | ||
154 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
155 | 1: sd v0, PT_R2(sp) # result | ||
156 | |||
157 | 2: j syscall_exit | ||
158 | 149 | ||
159 | /* ------------------------------------------------------------------------ */ | 150 | /* ------------------------------------------------------------------------ */ |
160 | 151 | ||
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table) | |||
584 | PTR sys_memfd_create | 575 | PTR sys_memfd_create |
585 | PTR sys_bpf /* 4355 */ | 576 | PTR sys_bpf /* 4355 */ |
586 | PTR compat_sys_execveat | 577 | PTR compat_sys_execveat |
578 | PTR sys_userfaultfd | ||
579 | PTR sys_membarrier | ||
587 | .size sys32_call_table,.-sys32_call_table | 580 | .size sys32_call_table,.-sys32_call_table |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -338,7 +338,7 @@ static void __init bootmem_init(void) | |||
338 | if (end <= reserved_end) | 338 | if (end <= reserved_end) |
339 | continue; | 339 | continue; |
340 | #ifdef CONFIG_BLK_DEV_INITRD | 340 | #ifdef CONFIG_BLK_DEV_INITRD |
341 | /* mapstart should be after initrd_end */ | 341 | /* Skip zones before initrd and initrd itself */ |
342 | if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) | 342 | if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) |
343 | continue; | 343 | continue; |
344 | #endif | 344 | #endif |
@@ -371,6 +371,14 @@ static void __init bootmem_init(void) | |||
371 | max_low_pfn = PFN_DOWN(HIGHMEM_START); | 371 | max_low_pfn = PFN_DOWN(HIGHMEM_START); |
372 | } | 372 | } |
373 | 373 | ||
374 | #ifdef CONFIG_BLK_DEV_INITRD | ||
375 | /* | ||
376 | * mapstart should be after initrd_end | ||
377 | */ | ||
378 | if (initrd_end) | ||
379 | mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); | ||
380 | #endif | ||
381 | |||
374 | /* | 382 | /* |
375 | * Initialize the boot-time allocator with low memory only. | 383 | * Initialize the boot-time allocator with low memory only. |
376 | */ | 384 | */ |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
43 | #include <asm/time.h> | 43 | #include <asm/time.h> |
44 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
45 | #include <asm/maar.h> | ||
45 | 46 | ||
46 | cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 47 | cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
47 | 48 | ||
@@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) | |||
157 | mips_clockevent_init(); | 158 | mips_clockevent_init(); |
158 | mp_ops->init_secondary(); | 159 | mp_ops->init_secondary(); |
159 | cpu_report(); | 160 | cpu_report(); |
161 | maar_init(); | ||
160 | 162 | ||
161 | /* | 163 | /* |
162 | * XXX parity protection should be folded in here when it's converted | 164 | * XXX parity protection should be folded in here when it's converted |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index cd4c129ce743..49ff3bfc007e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
55 | { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, | 55 | { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, |
56 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, | 56 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
57 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, | 57 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, |
58 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, | ||
58 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, | 59 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
59 | {NULL} | 60 | {NULL} |
60 | }; | 61 | }; |
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c | |||
@@ -64,6 +64,9 @@ void __init prom_init_env(void) | |||
64 | } | 64 | } |
65 | if (memsize == 0) | 65 | if (memsize == 0) |
66 | memsize = 256; | 66 | memsize = 256; |
67 | |||
68 | loongson_sysconf.nr_uarts = 1; | ||
69 | |||
67 | pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); | 70 | pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); |
68 | #else | 71 | #else |
69 | struct boot_params *boot_p; | 72 | struct boot_params *boot_p; |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) | |||
100 | else | 100 | else |
101 | #endif | 101 | #endif |
102 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | 102 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) |
103 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | 103 | if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) |
104 | dma_flag = __GFP_DMA; | 104 | dma_flag = __GFP_DMA; |
105 | else | 105 | else |
106 | #endif | 106 | #endif |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/pgalloc.h> | 44 | #include <asm/pgalloc.h> |
45 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
46 | #include <asm/fixmap.h> | 46 | #include <asm/fixmap.h> |
47 | #include <asm/maar.h> | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * We have up to 8 empty zeroed pages so we can map one of the right colour | 50 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
@@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, | |||
252 | #endif | 253 | #endif |
253 | } | 254 | } |
254 | 255 | ||
256 | unsigned __weak platform_maar_init(unsigned num_pairs) | ||
257 | { | ||
258 | struct maar_config cfg[BOOT_MEM_MAP_MAX]; | ||
259 | unsigned i, num_configured, num_cfg = 0; | ||
260 | phys_addr_t skip; | ||
261 | |||
262 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
263 | switch (boot_mem_map.map[i].type) { | ||
264 | case BOOT_MEM_RAM: | ||
265 | case BOOT_MEM_INIT_RAM: | ||
266 | break; | ||
267 | default: | ||
268 | continue; | ||
269 | } | ||
270 | |||
271 | skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); | ||
272 | |||
273 | cfg[num_cfg].lower = boot_mem_map.map[i].addr; | ||
274 | cfg[num_cfg].lower += skip; | ||
275 | |||
276 | cfg[num_cfg].upper = cfg[num_cfg].lower; | ||
277 | cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; | ||
278 | cfg[num_cfg].upper -= skip; | ||
279 | |||
280 | cfg[num_cfg].attrs = MIPS_MAAR_S; | ||
281 | num_cfg++; | ||
282 | } | ||
283 | |||
284 | num_configured = maar_config(cfg, num_cfg, num_pairs); | ||
285 | if (num_configured < num_cfg) | ||
286 | pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", | ||
287 | num_pairs, num_cfg); | ||
288 | |||
289 | return num_configured; | ||
290 | } | ||
291 | |||
292 | void maar_init(void) | ||
293 | { | ||
294 | unsigned num_maars, used, i; | ||
295 | phys_addr_t lower, upper, attr; | ||
296 | static struct { | ||
297 | struct maar_config cfgs[3]; | ||
298 | unsigned used; | ||
299 | } recorded = { { { 0 } }, 0 }; | ||
300 | |||
301 | if (!cpu_has_maar) | ||
302 | return; | ||
303 | |||
304 | /* Detect the number of MAARs */ | ||
305 | write_c0_maari(~0); | ||
306 | back_to_back_c0_hazard(); | ||
307 | num_maars = read_c0_maari() + 1; | ||
308 | |||
309 | /* MAARs should be in pairs */ | ||
310 | WARN_ON(num_maars % 2); | ||
311 | |||
312 | /* Set MAARs using values we recorded already */ | ||
313 | if (recorded.used) { | ||
314 | used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); | ||
315 | BUG_ON(used != recorded.used); | ||
316 | } else { | ||
317 | /* Configure the required MAARs */ | ||
318 | used = platform_maar_init(num_maars / 2); | ||
319 | } | ||
320 | |||
321 | /* Disable any further MAARs */ | ||
322 | for (i = (used * 2); i < num_maars; i++) { | ||
323 | write_c0_maari(i); | ||
324 | back_to_back_c0_hazard(); | ||
325 | write_c0_maar(0); | ||
326 | back_to_back_c0_hazard(); | ||
327 | } | ||
328 | |||
329 | if (recorded.used) | ||
330 | return; | ||
331 | |||
332 | pr_info("MAAR configuration:\n"); | ||
333 | for (i = 0; i < num_maars; i += 2) { | ||
334 | write_c0_maari(i); | ||
335 | back_to_back_c0_hazard(); | ||
336 | upper = read_c0_maar(); | ||
337 | |||
338 | write_c0_maari(i + 1); | ||
339 | back_to_back_c0_hazard(); | ||
340 | lower = read_c0_maar(); | ||
341 | |||
342 | attr = lower & upper; | ||
343 | lower = (lower & MIPS_MAAR_ADDR) << 4; | ||
344 | upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; | ||
345 | |||
346 | pr_info(" [%d]: ", i / 2); | ||
347 | if (!(attr & MIPS_MAAR_V)) { | ||
348 | pr_cont("disabled\n"); | ||
349 | continue; | ||
350 | } | ||
351 | |||
352 | pr_cont("%pa-%pa", &lower, &upper); | ||
353 | |||
354 | if (attr & MIPS_MAAR_S) | ||
355 | pr_cont(" speculate"); | ||
356 | |||
357 | pr_cont("\n"); | ||
358 | |||
359 | /* Record the setup for use on secondary CPUs */ | ||
360 | if (used <= ARRAY_SIZE(recorded.cfgs)) { | ||
361 | recorded.cfgs[recorded.used].lower = lower; | ||
362 | recorded.cfgs[recorded.used].upper = upper; | ||
363 | recorded.cfgs[recorded.used].attrs = attr; | ||
364 | recorded.used++; | ||
365 | } | ||
366 | } | ||
367 | } | ||
368 | |||
255 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 369 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
256 | int page_is_ram(unsigned long pagenr) | 370 | int page_is_ram(unsigned long pagenr) |
257 | { | 371 | { |
@@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) | |||
334 | #endif | 448 | #endif |
335 | } | 449 | } |
336 | 450 | ||
337 | unsigned __weak platform_maar_init(unsigned num_pairs) | ||
338 | { | ||
339 | struct maar_config cfg[BOOT_MEM_MAP_MAX]; | ||
340 | unsigned i, num_configured, num_cfg = 0; | ||
341 | phys_addr_t skip; | ||
342 | |||
343 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
344 | switch (boot_mem_map.map[i].type) { | ||
345 | case BOOT_MEM_RAM: | ||
346 | case BOOT_MEM_INIT_RAM: | ||
347 | break; | ||
348 | default: | ||
349 | continue; | ||
350 | } | ||
351 | |||
352 | skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); | ||
353 | |||
354 | cfg[num_cfg].lower = boot_mem_map.map[i].addr; | ||
355 | cfg[num_cfg].lower += skip; | ||
356 | |||
357 | cfg[num_cfg].upper = cfg[num_cfg].lower; | ||
358 | cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; | ||
359 | cfg[num_cfg].upper -= skip; | ||
360 | |||
361 | cfg[num_cfg].attrs = MIPS_MAAR_S; | ||
362 | num_cfg++; | ||
363 | } | ||
364 | |||
365 | num_configured = maar_config(cfg, num_cfg, num_pairs); | ||
366 | if (num_configured < num_cfg) | ||
367 | pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", | ||
368 | num_pairs, num_cfg); | ||
369 | |||
370 | return num_configured; | ||
371 | } | ||
372 | |||
373 | static void maar_init(void) | ||
374 | { | ||
375 | unsigned num_maars, used, i; | ||
376 | |||
377 | if (!cpu_has_maar) | ||
378 | return; | ||
379 | |||
380 | /* Detect the number of MAARs */ | ||
381 | write_c0_maari(~0); | ||
382 | back_to_back_c0_hazard(); | ||
383 | num_maars = read_c0_maari() + 1; | ||
384 | |||
385 | /* MAARs should be in pairs */ | ||
386 | WARN_ON(num_maars % 2); | ||
387 | |||
388 | /* Configure the required MAARs */ | ||
389 | used = platform_maar_init(num_maars / 2); | ||
390 | |||
391 | /* Disable any further MAARs */ | ||
392 | for (i = (used * 2); i < num_maars; i++) { | ||
393 | write_c0_maari(i); | ||
394 | back_to_back_c0_hazard(); | ||
395 | write_c0_maar(0); | ||
396 | back_to_back_c0_hazard(); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | void __init mem_init(void) | 451 | void __init mem_init(void) |
401 | { | 452 | { |
402 | #ifdef CONFIG_HIGHMEM | 453 | #ifdef CONFIG_HIGHMEM |
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S | |||
@@ -57,15 +57,28 @@ | |||
57 | 57 | ||
58 | LEAF(sk_load_word) | 58 | LEAF(sk_load_word) |
59 | is_offset_negative(word) | 59 | is_offset_negative(word) |
60 | .globl sk_load_word_positive | 60 | FEXPORT(sk_load_word_positive) |
61 | sk_load_word_positive: | ||
62 | is_offset_in_header(4, word) | 61 | is_offset_in_header(4, word) |
63 | /* Offset within header boundaries */ | 62 | /* Offset within header boundaries */ |
64 | PTR_ADDU t1, $r_skb_data, offset | 63 | PTR_ADDU t1, $r_skb_data, offset |
64 | .set reorder | ||
65 | lw $r_A, 0(t1) | 65 | lw $r_A, 0(t1) |
66 | .set noreorder | ||
66 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 67 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
68 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
67 | wsbh t0, $r_A | 69 | wsbh t0, $r_A |
68 | rotr $r_A, t0, 16 | 70 | rotr $r_A, t0, 16 |
71 | # else | ||
72 | sll t0, $r_A, 24 | ||
73 | srl t1, $r_A, 24 | ||
74 | srl t2, $r_A, 8 | ||
75 | or t0, t0, t1 | ||
76 | andi t2, t2, 0xff00 | ||
77 | andi t1, $r_A, 0xff00 | ||
78 | or t0, t0, t2 | ||
79 | sll t1, t1, 8 | ||
80 | or $r_A, t0, t1 | ||
81 | # endif | ||
69 | #endif | 82 | #endif |
70 | jr $r_ra | 83 | jr $r_ra |
71 | move $r_ret, zero | 84 | move $r_ret, zero |
@@ -73,15 +86,24 @@ sk_load_word_positive: | |||
73 | 86 | ||
74 | LEAF(sk_load_half) | 87 | LEAF(sk_load_half) |
75 | is_offset_negative(half) | 88 | is_offset_negative(half) |
76 | .globl sk_load_half_positive | 89 | FEXPORT(sk_load_half_positive) |
77 | sk_load_half_positive: | ||
78 | is_offset_in_header(2, half) | 90 | is_offset_in_header(2, half) |
79 | /* Offset within header boundaries */ | 91 | /* Offset within header boundaries */ |
80 | PTR_ADDU t1, $r_skb_data, offset | 92 | PTR_ADDU t1, $r_skb_data, offset |
93 | .set reorder | ||
81 | lh $r_A, 0(t1) | 94 | lh $r_A, 0(t1) |
95 | .set noreorder | ||
82 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 96 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
97 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
83 | wsbh t0, $r_A | 98 | wsbh t0, $r_A |
84 | seh $r_A, t0 | 99 | seh $r_A, t0 |
100 | # else | ||
101 | sll t0, $r_A, 24 | ||
102 | andi t1, $r_A, 0xff00 | ||
103 | sra t0, t0, 16 | ||
104 | srl t1, t1, 8 | ||
105 | or $r_A, t0, t1 | ||
106 | # endif | ||
85 | #endif | 107 | #endif |
86 | jr $r_ra | 108 | jr $r_ra |
87 | move $r_ret, zero | 109 | move $r_ret, zero |
@@ -89,8 +111,7 @@ sk_load_half_positive: | |||
89 | 111 | ||
90 | LEAF(sk_load_byte) | 112 | LEAF(sk_load_byte) |
91 | is_offset_negative(byte) | 113 | is_offset_negative(byte) |
92 | .globl sk_load_byte_positive | 114 | FEXPORT(sk_load_byte_positive) |
93 | sk_load_byte_positive: | ||
94 | is_offset_in_header(1, byte) | 115 | is_offset_in_header(1, byte) |
95 | /* Offset within header boundaries */ | 116 | /* Offset within header boundaries */ |
96 | PTR_ADDU t1, $r_skb_data, offset | 117 | PTR_ADDU t1, $r_skb_data, offset |
@@ -148,23 +169,47 @@ sk_load_byte_positive: | |||
148 | NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) | 169 | NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) |
149 | bpf_slow_path_common(4) | 170 | bpf_slow_path_common(4) |
150 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 171 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
172 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
151 | wsbh t0, $r_s0 | 173 | wsbh t0, $r_s0 |
152 | jr $r_ra | 174 | jr $r_ra |
153 | rotr $r_A, t0, 16 | 175 | rotr $r_A, t0, 16 |
154 | #endif | 176 | # else |
177 | sll t0, $r_s0, 24 | ||
178 | srl t1, $r_s0, 24 | ||
179 | srl t2, $r_s0, 8 | ||
180 | or t0, t0, t1 | ||
181 | andi t2, t2, 0xff00 | ||
182 | andi t1, $r_s0, 0xff00 | ||
183 | or t0, t0, t2 | ||
184 | sll t1, t1, 8 | ||
185 | jr $r_ra | ||
186 | or $r_A, t0, t1 | ||
187 | # endif | ||
188 | #else | ||
155 | jr $r_ra | 189 | jr $r_ra |
156 | move $r_A, $r_s0 | 190 | move $r_A, $r_s0 |
191 | #endif | ||
157 | 192 | ||
158 | END(bpf_slow_path_word) | 193 | END(bpf_slow_path_word) |
159 | 194 | ||
160 | NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) | 195 | NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) |
161 | bpf_slow_path_common(2) | 196 | bpf_slow_path_common(2) |
162 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 197 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
198 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
163 | jr $r_ra | 199 | jr $r_ra |
164 | wsbh $r_A, $r_s0 | 200 | wsbh $r_A, $r_s0 |
165 | #endif | 201 | # else |
202 | sll t0, $r_s0, 8 | ||
203 | andi t1, $r_s0, 0xff00 | ||
204 | andi t0, t0, 0xff00 | ||
205 | srl t1, t1, 8 | ||
206 | jr $r_ra | ||
207 | or $r_A, t0, t1 | ||
208 | # endif | ||
209 | #else | ||
166 | jr $r_ra | 210 | jr $r_ra |
167 | move $r_A, $r_s0 | 211 | move $r_A, $r_s0 |
212 | #endif | ||
168 | 213 | ||
169 | END(bpf_slow_path_half) | 214 | END(bpf_slow_path_half) |
170 | 215 | ||
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 0136b4f9c9cd..10d86d54880a 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c | |||
@@ -82,7 +82,7 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | /* IRQ_IPI_SMP_FUNCTION Handler */ | 84 | /* IRQ_IPI_SMP_FUNCTION Handler */ |
85 | void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) | 85 | void nlm_smp_function_ipi_handler(struct irq_desc *desc) |
86 | { | 86 | { |
87 | unsigned int irq = irq_desc_get_irq(desc); | 87 | unsigned int irq = irq_desc_get_irq(desc); |
88 | clear_c0_eimr(irq); | 88 | clear_c0_eimr(irq); |
@@ -92,7 +92,7 @@ void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /* IRQ_IPI_SMP_RESCHEDULE handler */ | 94 | /* IRQ_IPI_SMP_RESCHEDULE handler */ |
95 | void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc) | 95 | void nlm_smp_resched_ipi_handler(struct irq_desc *desc) |
96 | { | 96 | { |
97 | unsigned int irq = irq_desc_get_irq(desc); | 97 | unsigned int irq = irq_desc_get_irq(desc); |
98 | clear_c0_eimr(irq); | 98 | clear_c0_eimr(irq); |
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index f8d0acb4f973..b4fa6413c4e5 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c | |||
@@ -318,7 +318,7 @@ static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc) | |||
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc) | 321 | static void ar2315_pci_irq_handler(struct irq_desc *desc) |
322 | { | 322 | { |
323 | struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); | 323 | struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); |
324 | u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & | 324 | u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & |
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index ad35a5e6a56c..7db963deec73 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c | |||
@@ -226,7 +226,7 @@ static struct pci_ops ar71xx_pci_ops = { | |||
226 | .write = ar71xx_pci_write_config, | 226 | .write = ar71xx_pci_write_config, |
227 | }; | 227 | }; |
228 | 228 | ||
229 | static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc) | 229 | static void ar71xx_pci_irq_handler(struct irq_desc *desc) |
230 | { | 230 | { |
231 | struct ar71xx_pci_controller *apc; | 231 | struct ar71xx_pci_controller *apc; |
232 | void __iomem *base = ath79_reset_base; | 232 | void __iomem *base = ath79_reset_base; |
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 907d11dd921b..2013dad700df 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c | |||
@@ -225,7 +225,7 @@ static struct pci_ops ar724x_pci_ops = { | |||
225 | .write = ar724x_pci_write, | 225 | .write = ar724x_pci_write, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc) | 228 | static void ar724x_pci_irq_handler(struct irq_desc *desc) |
229 | { | 229 | { |
230 | struct ar724x_pci_controller *apc; | 230 | struct ar724x_pci_controller *apc; |
231 | void __iomem *base; | 231 | void __iomem *base; |
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 53c8efaf1572..ed6732f9aa87 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c | |||
@@ -129,7 +129,7 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, | |||
129 | rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); | 129 | rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); |
130 | } | 130 | } |
131 | 131 | ||
132 | static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc) | 132 | static void rt3883_pci_irq_handler(struct irq_desc *desc) |
133 | { | 133 | { |
134 | struct rt3883_pci_controller *rpc; | 134 | struct rt3883_pci_controller *rpc; |
135 | u32 pending; | 135 | u32 pending; |
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index c6996cf67a5c..b8a0bf5766f2 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c | |||
@@ -311,6 +311,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
311 | 311 | ||
312 | void pcibios_fixup_bus(struct pci_bus *bus) | 312 | void pcibios_fixup_bus(struct pci_bus *bus) |
313 | { | 313 | { |
314 | struct pci_dev *dev = bus->self; | ||
315 | |||
316 | if (pci_has_flag(PCI_PROBE_ONLY) && dev && | ||
317 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
318 | pci_read_bridge_bases(bus); | ||
319 | } | ||
314 | } | 320 | } |
315 | 321 | ||
316 | EXPORT_SYMBOL(PCIBIOS_MIN_IO); | 322 | EXPORT_SYMBOL(PCIBIOS_MIN_IO); |
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 8c624a8b9ea2..4cf77f358395 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c | |||
@@ -96,7 +96,7 @@ unsigned int get_c0_compare_int(void) | |||
96 | return CP0_LEGACY_COMPARE_IRQ; | 96 | return CP0_LEGACY_COMPARE_IRQ; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) | 99 | static void ralink_intc_irq_handler(struct irq_desc *desc) |
100 | { | 100 | { |
101 | u32 pending = rt_intc_r32(INTC_REG_STATUS0); | 101 | u32 pending = rt_intc_r32(INTC_REG_STATUS0); |
102 | 102 | ||
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild | |||
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h | |||
9 | generic-y += preempt.h | 9 | generic-y += preempt.h |
10 | generic-y += sections.h | 10 | generic-y += sections.h |
11 | generic-y += trace_clock.h | 11 | generic-y += trace_clock.h |
12 | generic-y += word-at-a-time.h | ||
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index deaa893efba5..3dfe2d31c67b 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c | |||
@@ -324,6 +324,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) | |||
324 | struct pci_dev *dev; | 324 | struct pci_dev *dev; |
325 | 325 | ||
326 | if (bus->self) { | 326 | if (bus->self) { |
327 | pci_read_bridge_bases(bus); | ||
327 | pcibios_fixup_bridge_resources(bus->self); | 328 | pcibios_fixup_bridge_resources(bus->self); |
328 | } | 329 | } |
329 | 330 | ||
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild | |||
@@ -61,4 +61,5 @@ generic-y += types.h | |||
61 | generic-y += unaligned.h | 61 | generic-y += unaligned.h |
62 | generic-y += user.h | 62 | generic-y += user.h |
63 | generic-y += vga.h | 63 | generic-y += vga.h |
64 | generic-y += word-at-a-time.h | ||
64 | generic-y += xor.h | 65 | generic-y += xor.h |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 73eddda53b8e..4eec430d8fa8 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64 | |||
28 | endif | 28 | endif |
29 | ifdef CONFIG_CPU_BIG_ENDIAN | 29 | ifdef CONFIG_CPU_BIG_ENDIAN |
30 | BOOTCFLAGS += -mbig-endian | 30 | BOOTCFLAGS += -mbig-endian |
31 | else | ||
32 | BOOTCFLAGS += -mlittle-endian | ||
33 | BOOTCFLAGS += $(call cc-option,-mabi=elfv2) | ||
31 | endif | 34 | endif |
32 | 35 | ||
33 | BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc | 36 | BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 6bc0ee4b1070..2c041b535a64 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m | |||
111 | CONFIG_SCSI_QLA_ISCSI=m | 111 | CONFIG_SCSI_QLA_ISCSI=m |
112 | CONFIG_SCSI_LPFC=m | 112 | CONFIG_SCSI_LPFC=m |
113 | CONFIG_SCSI_VIRTIO=m | 113 | CONFIG_SCSI_VIRTIO=m |
114 | CONFIG_SCSI_DH=m | 114 | CONFIG_SCSI_DH=y |
115 | CONFIG_SCSI_DH_RDAC=m | 115 | CONFIG_SCSI_DH_RDAC=m |
116 | CONFIG_SCSI_DH_ALUA=m | 116 | CONFIG_SCSI_DH_ALUA=m |
117 | CONFIG_ATA=y | 117 | CONFIG_ATA=y |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 7991f37e5fe2..36871a4bfa54 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m | |||
114 | CONFIG_SCSI_QLA_ISCSI=m | 114 | CONFIG_SCSI_QLA_ISCSI=m |
115 | CONFIG_SCSI_LPFC=m | 115 | CONFIG_SCSI_LPFC=m |
116 | CONFIG_SCSI_VIRTIO=m | 116 | CONFIG_SCSI_VIRTIO=m |
117 | CONFIG_SCSI_DH=m | 117 | CONFIG_SCSI_DH=y |
118 | CONFIG_SCSI_DH_RDAC=m | 118 | CONFIG_SCSI_DH_RDAC=m |
119 | CONFIG_SCSI_DH_ALUA=m | 119 | CONFIG_SCSI_DH_ALUA=m |
120 | CONFIG_ATA=y | 120 | CONFIG_ATA=y |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 98eebbf66340..827a38d7a9db 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #ifdef CONFIG_KVM_MMIO | 44 | #ifdef CONFIG_KVM_MMIO |
45 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 45 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
46 | #endif | 46 | #endif |
47 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | ||
47 | 48 | ||
48 | /* These values are internal and can be increased later */ | 49 | /* These values are internal and can be increased later */ |
49 | #define KVM_NR_IRQCHIPS 1 | 50 | #define KVM_NR_IRQCHIPS 1 |
@@ -108,6 +109,7 @@ struct kvm_vcpu_stat { | |||
108 | u32 dec_exits; | 109 | u32 dec_exits; |
109 | u32 ext_intr_exits; | 110 | u32 ext_intr_exits; |
110 | u32 halt_successful_poll; | 111 | u32 halt_successful_poll; |
112 | u32 halt_attempted_poll; | ||
111 | u32 halt_wakeup; | 113 | u32 halt_wakeup; |
112 | u32 dbell_exits; | 114 | u32 dbell_exits; |
113 | u32 gdbell_exits; | 115 | u32 gdbell_exits; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cab6753f1be5..3f191f573d4f 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -61,8 +61,13 @@ struct machdep_calls { | |||
61 | unsigned long addr, | 61 | unsigned long addr, |
62 | unsigned char *hpte_slot_array, | 62 | unsigned char *hpte_slot_array, |
63 | int psize, int ssize, int local); | 63 | int psize, int ssize, int local); |
64 | /* special for kexec, to be called in real mode, linear mapping is | 64 | /* |
65 | * destroyed as well */ | 65 | * Special for kexec. |
66 | * To be called in real mode with interrupts disabled. No locks are | ||
67 | * taken as such, concurrent access on pre POWER5 hardware could result | ||
68 | * in a deadlock. | ||
69 | * The linear mapping is destroyed as well. | ||
70 | */ | ||
66 | void (*hpte_clear_all)(void); | 71 | void (*hpte_clear_all)(void); |
67 | 72 | ||
68 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, | 73 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, |
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 25784cc959a0..1e155ca6d33c 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h | |||
@@ -59,14 +59,14 @@ enum qe_ic_grp_id { | |||
59 | 59 | ||
60 | #ifdef CONFIG_QUICC_ENGINE | 60 | #ifdef CONFIG_QUICC_ENGINE |
61 | void qe_ic_init(struct device_node *node, unsigned int flags, | 61 | void qe_ic_init(struct device_node *node, unsigned int flags, |
62 | void (*low_handler)(unsigned int irq, struct irq_desc *desc), | 62 | void (*low_handler)(struct irq_desc *desc), |
63 | void (*high_handler)(unsigned int irq, struct irq_desc *desc)); | 63 | void (*high_handler)(struct irq_desc *desc)); |
64 | unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); | 64 | unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); |
65 | unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); | 65 | unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); |
66 | #else | 66 | #else |
67 | static inline void qe_ic_init(struct device_node *node, unsigned int flags, | 67 | static inline void qe_ic_init(struct device_node *node, unsigned int flags, |
68 | void (*low_handler)(unsigned int irq, struct irq_desc *desc), | 68 | void (*low_handler)(struct irq_desc *desc), |
69 | void (*high_handler)(unsigned int irq, struct irq_desc *desc)) | 69 | void (*high_handler)(struct irq_desc *desc)) |
70 | {} | 70 | {} |
71 | static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) | 71 | static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) |
72 | { return 0; } | 72 | { return 0; } |
@@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high); | |||
78 | int qe_ic_set_priority(unsigned int virq, unsigned int priority); | 78 | int qe_ic_set_priority(unsigned int virq, unsigned int priority); |
79 | int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); | 79 | int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); |
80 | 80 | ||
81 | static inline void qe_ic_cascade_low_ipic(unsigned int irq, | 81 | static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) |
82 | struct irq_desc *desc) | ||
83 | { | 82 | { |
84 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | 83 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
85 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 84 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
@@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, | |||
88 | generic_handle_irq(cascade_irq); | 87 | generic_handle_irq(cascade_irq); |
89 | } | 88 | } |
90 | 89 | ||
91 | static inline void qe_ic_cascade_high_ipic(unsigned int irq, | 90 | static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) |
92 | struct irq_desc *desc) | ||
93 | { | 91 | { |
94 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | 92 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
95 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 93 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
@@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, | |||
98 | generic_handle_irq(cascade_irq); | 96 | generic_handle_irq(cascade_irq); |
99 | } | 97 | } |
100 | 98 | ||
101 | static inline void qe_ic_cascade_low_mpic(unsigned int irq, | 99 | static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) |
102 | struct irq_desc *desc) | ||
103 | { | 100 | { |
104 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | 101 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
105 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 102 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
@@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq, | |||
111 | chip->irq_eoi(&desc->irq_data); | 108 | chip->irq_eoi(&desc->irq_data); |
112 | } | 109 | } |
113 | 110 | ||
114 | static inline void qe_ic_cascade_high_mpic(unsigned int irq, | 111 | static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) |
115 | struct irq_desc *desc) | ||
116 | { | 112 | { |
117 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | 113 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
118 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 114 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
@@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq, | |||
124 | chip->irq_eoi(&desc->irq_data); | 120 | chip->irq_eoi(&desc->irq_data); |
125 | } | 121 | } |
126 | 122 | ||
127 | static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, | 123 | static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) |
128 | struct irq_desc *desc) | ||
129 | { | 124 | { |
130 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | 125 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
131 | unsigned int cascade_irq; | 126 | unsigned int cascade_irq; |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 71f2b3f02cf8..126d0c4f9b7d 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -368,3 +368,5 @@ SYSCALL_SPU(memfd_create) | |||
368 | SYSCALL_SPU(bpf) | 368 | SYSCALL_SPU(bpf) |
369 | COMPAT_SYS(execveat) | 369 | COMPAT_SYS(execveat) |
370 | PPC64ONLY(switch_endian) | 370 | PPC64ONLY(switch_endian) |
371 | SYSCALL_SPU(userfaultfd) | ||
372 | SYSCALL_SPU(membarrier) | ||
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h index 5653d7cc3e24..ae59d5b672b0 100644 --- a/arch/powerpc/include/asm/tsi108_pci.h +++ b/arch/powerpc/include/asm/tsi108_pci.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary); | 40 | extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary); |
41 | extern void tsi108_pci_int_init(struct device_node *node); | 41 | extern void tsi108_pci_int_init(struct device_node *node); |
42 | extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc); | 42 | extern void tsi108_irq_cascade(struct irq_desc *desc); |
43 | extern void tsi108_clear_pci_cfg_error(void); | 43 | extern void tsi108_clear_pci_cfg_error(void); |
44 | 44 | ||
45 | #endif /* _ASM_POWERPC_TSI108_PCI_H */ | 45 | #endif /* _ASM_POWERPC_TSI108_PCI_H */ |
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f4f8b667d75b..13411be86041 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define __NR_syscalls 364 | 15 | #define __NR_syscalls 366 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 5b3a903adae6..e4396a7d0f7c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h | |||
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
40 | return (val + c->high_bits) & ~rhs; | 40 | return (val + c->high_bits) & ~rhs; |
41 | } | 41 | } |
42 | 42 | ||
43 | static inline unsigned long zero_bytemask(unsigned long mask) | ||
44 | { | ||
45 | return ~1ul << __fls(mask); | ||
46 | } | ||
47 | |||
43 | #else | 48 | #else |
44 | 49 | ||
45 | #ifdef CONFIG_64BIT | 50 | #ifdef CONFIG_64BIT |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index e4aa173dae62..6337738018aa 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
@@ -386,5 +386,7 @@ | |||
386 | #define __NR_bpf 361 | 386 | #define __NR_bpf 361 |
387 | #define __NR_execveat 362 | 387 | #define __NR_execveat 362 |
388 | #define __NR_switch_endian 363 | 388 | #define __NR_switch_endian 363 |
389 | #define __NR_userfaultfd 364 | ||
390 | #define __NR_membarrier 365 | ||
389 | 391 | ||
390 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 392 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 45096033d37b..290559df1e8b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -441,7 +441,7 @@ void migrate_irqs(void) | |||
441 | 441 | ||
442 | chip = irq_data_get_irq_chip(data); | 442 | chip = irq_data_get_irq_chip(data); |
443 | 443 | ||
444 | cpumask_and(mask, data->affinity, map); | 444 | cpumask_and(mask, irq_data_get_affinity_mask(data), map); |
445 | if (cpumask_any(mask) >= nr_cpu_ids) { | 445 | if (cpumask_any(mask) >= nr_cpu_ids) { |
446 | pr_warn("Breaking affinity for irq %i\n", irq); | 446 | pr_warn("Breaking affinity for irq %i\n", irq); |
447 | cpumask_copy(mask, map); | 447 | cpumask_copy(mask, map); |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a1d0632d97c6..7587b2ae5f77 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1032,7 +1032,13 @@ void pcibios_set_master(struct pci_dev *dev) | |||
1032 | 1032 | ||
1033 | void pcibios_fixup_bus(struct pci_bus *bus) | 1033 | void pcibios_fixup_bus(struct pci_bus *bus) |
1034 | { | 1034 | { |
1035 | /* Fixup the bus */ | 1035 | /* When called from the generic PCI probe, read PCI<->PCI bridge |
1036 | * bases. This is -not- called when generating the PCI tree from | ||
1037 | * the OF device-tree. | ||
1038 | */ | ||
1039 | pci_read_bridge_bases(bus); | ||
1040 | |||
1041 | /* Now fixup the bus bus */ | ||
1036 | pcibios_setup_bus_self(bus); | 1042 | pcibios_setup_bus_self(bus); |
1037 | 1043 | ||
1038 | /* Now fixup devices on that bus */ | 1044 | /* Now fixup devices on that bus */ |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index bb02e9f6944e..ad8c9db61237 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/udbg.h> | 38 | #include <asm/udbg.h> |
39 | #include <asm/mmu_context.h> | 39 | #include <asm/mmu_context.h> |
40 | #include <asm/epapr_hcalls.h> | 40 | #include <asm/epapr_hcalls.h> |
41 | #include <asm/code-patching.h> | ||
41 | 42 | ||
42 | #define DBG(fmt...) | 43 | #define DBG(fmt...) |
43 | 44 | ||
@@ -109,6 +110,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) | |||
109 | * This is called very early on the boot process, after a minimal | 110 | * This is called very early on the boot process, after a minimal |
110 | * MMU environment has been set up but before MMU_init is called. | 111 | * MMU environment has been set up but before MMU_init is called. |
111 | */ | 112 | */ |
113 | extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ | ||
114 | |||
112 | notrace void __init machine_init(u64 dt_ptr) | 115 | notrace void __init machine_init(u64 dt_ptr) |
113 | { | 116 | { |
114 | lockdep_init(); | 117 | lockdep_init(); |
@@ -116,6 +119,9 @@ notrace void __init machine_init(u64 dt_ptr) | |||
116 | /* Enable early debugging if any specified (see udbg.h) */ | 119 | /* Enable early debugging if any specified (see udbg.h) */ |
117 | udbg_early_init(); | 120 | udbg_early_init(); |
118 | 121 | ||
122 | patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP); | ||
123 | patch_instruction(&memset_nocache_branch, PPC_INST_NOP); | ||
124 | |||
119 | /* Do some early initialization based on the flat device tree */ | 125 | /* Do some early initialization based on the flat device tree */ |
120 | early_init_devtree(__va(dt_ptr)); | 126 | early_init_devtree(__va(dt_ptr)); |
121 | 127 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index d75bf325f54a..099c79d8c160 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
53 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 53 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
54 | { "queue_intr", VCPU_STAT(queue_intr) }, | 54 | { "queue_intr", VCPU_STAT(queue_intr) }, |
55 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, | 55 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, |
56 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, | ||
56 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
57 | { "pf_storage", VCPU_STAT(pf_storage) }, | 58 | { "pf_storage", VCPU_STAT(pf_storage) }, |
58 | { "sp_storage", VCPU_STAT(sp_storage) }, | 59 | { "sp_storage", VCPU_STAT(sp_storage) }, |
@@ -828,12 +829,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) | |||
828 | unsigned long size = kvmppc_get_gpr(vcpu, 4); | 829 | unsigned long size = kvmppc_get_gpr(vcpu, 4); |
829 | unsigned long addr = kvmppc_get_gpr(vcpu, 5); | 830 | unsigned long addr = kvmppc_get_gpr(vcpu, 5); |
830 | u64 buf; | 831 | u64 buf; |
832 | int srcu_idx; | ||
831 | int ret; | 833 | int ret; |
832 | 834 | ||
833 | if (!is_power_of_2(size) || (size > sizeof(buf))) | 835 | if (!is_power_of_2(size) || (size > sizeof(buf))) |
834 | return H_TOO_HARD; | 836 | return H_TOO_HARD; |
835 | 837 | ||
838 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
836 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); | 839 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); |
840 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | ||
837 | if (ret != 0) | 841 | if (ret != 0) |
838 | return H_TOO_HARD; | 842 | return H_TOO_HARD; |
839 | 843 | ||
@@ -868,6 +872,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) | |||
868 | unsigned long addr = kvmppc_get_gpr(vcpu, 5); | 872 | unsigned long addr = kvmppc_get_gpr(vcpu, 5); |
869 | unsigned long val = kvmppc_get_gpr(vcpu, 6); | 873 | unsigned long val = kvmppc_get_gpr(vcpu, 6); |
870 | u64 buf; | 874 | u64 buf; |
875 | int srcu_idx; | ||
871 | int ret; | 876 | int ret; |
872 | 877 | ||
873 | switch (size) { | 878 | switch (size) { |
@@ -891,7 +896,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) | |||
891 | return H_TOO_HARD; | 896 | return H_TOO_HARD; |
892 | } | 897 | } |
893 | 898 | ||
899 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
894 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); | 900 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); |
901 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | ||
895 | if (ret != 0) | 902 | if (ret != 0) |
896 | return H_TOO_HARD; | 903 | return H_TOO_HARD; |
897 | 904 | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9754e6815e52..228049786888 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2692,9 +2692,13 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2692 | 2692 | ||
2693 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && | 2693 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
2694 | (vc->vcore_state == VCORE_RUNNING || | 2694 | (vc->vcore_state == VCORE_RUNNING || |
2695 | vc->vcore_state == VCORE_EXITING)) | 2695 | vc->vcore_state == VCORE_EXITING || |
2696 | vc->vcore_state == VCORE_PIGGYBACK)) | ||
2696 | kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); | 2697 | kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); |
2697 | 2698 | ||
2699 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) | ||
2700 | kvmppc_vcore_end_preempt(vc); | ||
2701 | |||
2698 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { | 2702 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
2699 | kvmppc_remove_runnable(vc, vcpu); | 2703 | kvmppc_remove_runnable(vc, vcpu); |
2700 | vcpu->stat.signal_exits++; | 2704 | vcpu->stat.signal_exits++; |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 2273dcacef39..b98889e9851d 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1257,6 +1257,7 @@ mc_cont: | |||
1257 | bl kvmhv_accumulate_time | 1257 | bl kvmhv_accumulate_time |
1258 | #endif | 1258 | #endif |
1259 | 1259 | ||
1260 | mr r3, r12 | ||
1260 | /* Increment exit count, poke other threads to exit */ | 1261 | /* Increment exit count, poke other threads to exit */ |
1261 | bl kvmhv_commence_exit | 1262 | bl kvmhv_commence_exit |
1262 | nop | 1263 | nop |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ae458f0fd061..fd5875179e5c 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
63 | { "dec", VCPU_STAT(dec_exits) }, | 63 | { "dec", VCPU_STAT(dec_exits) }, |
64 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 64 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
65 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | 65 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
66 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, | ||
66 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 67 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
67 | { "doorbell", VCPU_STAT(dbell_exits) }, | 68 | { "doorbell", VCPU_STAT(dbell_exits) }, |
68 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | 69 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, |
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 2ef50c629470..c44df2dbedd5 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S | |||
@@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1) | |||
73 | * Use dcbz on the complete cache lines in the destination | 73 | * Use dcbz on the complete cache lines in the destination |
74 | * to set them to zero. This requires that the destination | 74 | * to set them to zero. This requires that the destination |
75 | * area is cacheable. -- paulus | 75 | * area is cacheable. -- paulus |
76 | * | ||
77 | * During early init, cache might not be active yet, so dcbz cannot be used. | ||
78 | * We therefore skip the optimised bloc that uses dcbz. This jump is | ||
79 | * replaced by a nop once cache is active. This is done in machine_init() | ||
76 | */ | 80 | */ |
77 | _GLOBAL(memset) | 81 | _GLOBAL(memset) |
78 | rlwimi r4,r4,8,16,23 | 82 | rlwimi r4,r4,8,16,23 |
@@ -88,6 +92,8 @@ _GLOBAL(memset) | |||
88 | subf r6,r0,r6 | 92 | subf r6,r0,r6 |
89 | cmplwi 0,r4,0 | 93 | cmplwi 0,r4,0 |
90 | bne 2f /* Use normal procedure if r4 is not zero */ | 94 | bne 2f /* Use normal procedure if r4 is not zero */ |
95 | _GLOBAL(memset_nocache_branch) | ||
96 | b 2f /* Skip optimised bloc until cache is enabled */ | ||
91 | 97 | ||
92 | clrlwi r7,r6,32-LG_CACHELINE_BYTES | 98 | clrlwi r7,r6,32-LG_CACHELINE_BYTES |
93 | add r8,r7,r5 | 99 | add r8,r7,r5 |
@@ -128,6 +134,10 @@ _GLOBAL(memset) | |||
128 | * the destination area is cacheable. | 134 | * the destination area is cacheable. |
129 | * We only use this version if the source and dest don't overlap. | 135 | * We only use this version if the source and dest don't overlap. |
130 | * -- paulus. | 136 | * -- paulus. |
137 | * | ||
138 | * During early init, cache might not be active yet, so dcbz cannot be used. | ||
139 | * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is | ||
140 | * replaced by a nop once cache is active. This is done in machine_init() | ||
131 | */ | 141 | */ |
132 | _GLOBAL(memmove) | 142 | _GLOBAL(memmove) |
133 | cmplw 0,r3,r4 | 143 | cmplw 0,r3,r4 |
@@ -135,6 +145,7 @@ _GLOBAL(memmove) | |||
135 | /* fall through */ | 145 | /* fall through */ |
136 | 146 | ||
137 | _GLOBAL(memcpy) | 147 | _GLOBAL(memcpy) |
148 | b generic_memcpy | ||
138 | add r7,r3,r5 /* test if the src & dst overlap */ | 149 | add r7,r3,r5 /* test if the src & dst overlap */ |
139 | add r8,r4,r5 | 150 | add r8,r4,r5 |
140 | cmplw 0,r4,r7 | 151 | cmplw 0,r4,r7 |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 13befa35d8a8..c8822af10a58 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, | |||
582 | * be when they isi), and we are the only one left. We rely on our kernel | 582 | * be when they isi), and we are the only one left. We rely on our kernel |
583 | * mapping being 0xC0's and the hardware ignoring those two real bits. | 583 | * mapping being 0xC0's and the hardware ignoring those two real bits. |
584 | * | 584 | * |
585 | * This must be called with interrupts disabled. | ||
586 | * | ||
587 | * Taking the native_tlbie_lock is unsafe here due to the possibility of | ||
588 | * lockdep being on. On pre POWER5 hardware, not taking the lock could | ||
589 | * cause deadlock. POWER5 and newer not taking the lock is fine. This only | ||
590 | * gets called during boot before secondary CPUs have come up and during | ||
591 | * crashdump and all bets are off anyway. | ||
592 | * | ||
585 | * TODO: add batching support when enabled. remember, no dynamic memory here, | 593 | * TODO: add batching support when enabled. remember, no dynamic memory here, |
586 | * athough there is the control page available... | 594 | * athough there is the control page available... |
587 | */ | 595 | */ |
588 | static void native_hpte_clear(void) | 596 | static void native_hpte_clear(void) |
589 | { | 597 | { |
590 | unsigned long vpn = 0; | 598 | unsigned long vpn = 0; |
591 | unsigned long slot, slots, flags; | 599 | unsigned long slot, slots; |
592 | struct hash_pte *hptep = htab_address; | 600 | struct hash_pte *hptep = htab_address; |
593 | unsigned long hpte_v; | 601 | unsigned long hpte_v; |
594 | unsigned long pteg_count; | 602 | unsigned long pteg_count; |
@@ -596,13 +604,6 @@ static void native_hpte_clear(void) | |||
596 | 604 | ||
597 | pteg_count = htab_hash_mask + 1; | 605 | pteg_count = htab_hash_mask + 1; |
598 | 606 | ||
599 | local_irq_save(flags); | ||
600 | |||
601 | /* we take the tlbie lock and hold it. Some hardware will | ||
602 | * deadlock if we try to tlbie from two processors at once. | ||
603 | */ | ||
604 | raw_spin_lock(&native_tlbie_lock); | ||
605 | |||
606 | slots = pteg_count * HPTES_PER_GROUP; | 607 | slots = pteg_count * HPTES_PER_GROUP; |
607 | 608 | ||
608 | for (slot = 0; slot < slots; slot++, hptep++) { | 609 | for (slot = 0; slot < slots; slot++, hptep++) { |
@@ -614,8 +615,8 @@ static void native_hpte_clear(void) | |||
614 | hpte_v = be64_to_cpu(hptep->v); | 615 | hpte_v = be64_to_cpu(hptep->v); |
615 | 616 | ||
616 | /* | 617 | /* |
617 | * Call __tlbie() here rather than tlbie() since we | 618 | * Call __tlbie() here rather than tlbie() since we can't take the |
618 | * already hold the native_tlbie_lock. | 619 | * native_tlbie_lock. |
619 | */ | 620 | */ |
620 | if (hpte_v & HPTE_V_VALID) { | 621 | if (hpte_v & HPTE_V_VALID) { |
621 | hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); | 622 | hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); |
@@ -625,8 +626,6 @@ static void native_hpte_clear(void) | |||
625 | } | 626 | } |
626 | 627 | ||
627 | asm volatile("eieio; tlbsync; ptesync":::"memory"); | 628 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
628 | raw_spin_unlock(&native_tlbie_lock); | ||
629 | local_irq_restore(flags); | ||
630 | } | 629 | } |
631 | 630 | ||
632 | /* | 631 | /* |
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 43dafb9d6a46..4d87122cf6a7 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c | |||
@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
85 | BUG_ON(index >= 4096); | 85 | BUG_ON(index >= 4096); |
86 | 86 | ||
87 | vpn = hpt_vpn(ea, vsid, ssize); | 87 | vpn = hpt_vpn(ea, vsid, ssize); |
88 | hash = hpt_hash(vpn, shift, ssize); | ||
89 | hpte_slot_array = get_hpte_slot_array(pmdp); | 88 | hpte_slot_array = get_hpte_slot_array(pmdp); |
90 | if (psize == MMU_PAGE_4K) { | 89 | if (psize == MMU_PAGE_4K) { |
91 | /* | 90 | /* |
@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
101 | valid = hpte_valid(hpte_slot_array, index); | 100 | valid = hpte_valid(hpte_slot_array, index); |
102 | if (valid) { | 101 | if (valid) { |
103 | /* update the hpte bits */ | 102 | /* update the hpte bits */ |
103 | hash = hpt_hash(vpn, shift, ssize); | ||
104 | hidx = hpte_hash_index(hpte_slot_array, index); | 104 | hidx = hpte_hash_index(hpte_slot_array, index); |
105 | if (hidx & _PTEIDX_SECONDARY) | 105 | if (hidx & _PTEIDX_SECONDARY) |
106 | hash = ~hash; | 106 | hash = ~hash; |
@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
126 | if (!valid) { | 126 | if (!valid) { |
127 | unsigned long hpte_group; | 127 | unsigned long hpte_group; |
128 | 128 | ||
129 | hash = hpt_hash(vpn, shift, ssize); | ||
129 | /* insert new entry */ | 130 | /* insert new entry */ |
130 | pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; | 131 | pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; |
131 | new_pmd |= _PAGE_HASHPTE; | 132 | new_pmd |= _PAGE_HASHPTE; |
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 11090ab4bf59..0035d146df73 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | |||
@@ -104,9 +104,10 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, | |||
104 | return irq_linear_revmap(cpld_pic_host, cpld_irq); | 104 | return irq_linear_revmap(cpld_pic_host, cpld_irq); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void | 107 | static void cpld_pic_cascade(struct irq_desc *desc) |
108 | cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) | ||
109 | { | 108 | { |
109 | unsigned int irq; | ||
110 | |||
110 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, | 111 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, |
111 | &cpld_regs->pci_mask); | 112 | &cpld_regs->pci_mask); |
112 | if (irq != NO_IRQ) { | 113 | if (irq != NO_IRQ) { |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 32cae33c4266..8fb95480fd73 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = { | |||
80 | .irq_mask_ack = media5200_irq_mask, | 80 | .irq_mask_ack = media5200_irq_mask, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | 83 | static void media5200_irq_cascade(struct irq_desc *desc) |
84 | { | 84 | { |
85 | struct irq_chip *chip = irq_desc_get_chip(desc); | 85 | struct irq_chip *chip = irq_desc_get_chip(desc); |
86 | int sub_virq, val; | 86 | int sub_virq, val; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 63016621aff8..78ac19aefa4d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = { | |||
191 | .irq_set_type = mpc52xx_gpt_irq_set_type, | 191 | .irq_set_type = mpc52xx_gpt_irq_set_type, |
192 | }; | 192 | }; |
193 | 193 | ||
194 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) | 194 | static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc) |
195 | { | 195 | { |
196 | struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc); | 196 | struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc); |
197 | int sub_virq; | 197 | int sub_virq; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 2944bc84b9d6..4fe2074c88cb 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -196,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) | |||
196 | ctrl_reg |= (type << (22 - (l2irq * 2))); | 196 | ctrl_reg |= (type << (22 - (l2irq * 2))); |
197 | out_be32(&intr->ctrl, ctrl_reg); | 197 | out_be32(&intr->ctrl, ctrl_reg); |
198 | 198 | ||
199 | __irq_set_handler_locked(d->irq, handler); | 199 | irq_set_handler_locked(d, handler); |
200 | 200 | ||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 74861a7fb807..60e89fc9c753 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | |||
@@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = { | |||
78 | .irq_disable = pq2ads_pci_mask_irq | 78 | .irq_disable = pq2ads_pci_mask_irq |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | 81 | static void pq2ads_pci_irq_demux(struct irq_desc *desc) |
82 | { | 82 | { |
83 | struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); | 83 | struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); |
84 | u32 stat, mask, pend; | 84 | u32 stat, mask, pend; |
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 7bfb9b184dd4..23791de7b688 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c | |||
@@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void) | |||
49 | return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); | 49 | return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); |
50 | } | 50 | } |
51 | #ifdef CONFIG_CPM2 | 51 | #ifdef CONFIG_CPM2 |
52 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 52 | static void cpm2_cascade(struct irq_desc *desc) |
53 | { | 53 | { |
54 | struct irq_chip *chip = irq_desc_get_chip(desc); | 54 | struct irq_chip *chip = irq_desc_get_chip(desc); |
55 | int cascade_irq; | 55 | int cascade_irq; |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index b0753e222086..5ac70de3e48a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c | |||
@@ -192,8 +192,7 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | #ifdef CONFIG_PPC_I8259 | 194 | #ifdef CONFIG_PPC_I8259 |
195 | static void mpc85xx_8259_cascade_handler(unsigned int irq, | 195 | static void mpc85xx_8259_cascade_handler(struct irq_desc *desc) |
196 | struct irq_desc *desc) | ||
197 | { | 196 | { |
198 | unsigned int cascade_irq = i8259_irq(); | 197 | unsigned int cascade_irq = i8259_irq(); |
199 | 198 | ||
@@ -202,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int irq, | |||
202 | generic_handle_irq(cascade_irq); | 201 | generic_handle_irq(cascade_irq); |
203 | 202 | ||
204 | /* check for any interrupts from the shared IRQ line */ | 203 | /* check for any interrupts from the shared IRQ line */ |
205 | handle_fasteoi_irq(irq, desc); | 204 | handle_fasteoi_irq(desc); |
206 | } | 205 | } |
207 | 206 | ||
208 | static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) | 207 | static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index ffdf02121a7c..f858306dba6a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #ifdef CONFIG_PPC_I8259 | 48 | #ifdef CONFIG_PPC_I8259 |
49 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 49 | static void mpc85xx_8259_cascade(struct irq_desc *desc) |
50 | { | 50 | { |
51 | struct irq_chip *chip = irq_desc_get_chip(desc); | 51 | struct irq_chip *chip = irq_desc_get_chip(desc); |
52 | unsigned int cascade_irq = i8259_irq(); | 52 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 55a9682b9529..b02d6a5bb035 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
@@ -91,9 +91,10 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) | |||
91 | (irq_hw_number_t)i); | 91 | (irq_hw_number_t)i); |
92 | } | 92 | } |
93 | 93 | ||
94 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | 94 | static void socrates_fpga_pic_cascade(struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct irq_chip *chip = irq_desc_get_chip(desc); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | unsigned int irq = irq_desc_get_irq(desc); | ||
97 | unsigned int cascade_irq; | 98 | unsigned int cascade_irq; |
98 | 99 | ||
99 | /* | 100 | /* |
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index d5b98c0f958a..845defa1fd19 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/i8259.h> | 17 | #include <asm/i8259.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PPC_I8259 | 19 | #ifdef CONFIG_PPC_I8259 |
20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 20 | static void mpc86xx_8259_cascade(struct irq_desc *desc) |
21 | { | 21 | { |
22 | struct irq_chip *chip = irq_desc_get_chip(desc); | 22 | struct irq_chip *chip = irq_desc_get_chip(desc); |
23 | unsigned int cascade_irq = i8259_irq(); | 23 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index d3037747031d..c289fc77b4ba 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c | |||
@@ -214,7 +214,7 @@ void mpc8xx_restart(char *cmd) | |||
214 | panic("Restart failed\n"); | 214 | panic("Restart failed\n"); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void cpm_cascade(unsigned int irq, struct irq_desc *desc) | 217 | static void cpm_cascade(struct irq_desc *desc) |
218 | { | 218 | { |
219 | struct irq_chip *chip = irq_desc_get_chip(desc); | 219 | struct irq_chip *chip = irq_desc_get_chip(desc); |
220 | int cascade_irq = cpm_get_irq(); | 220 | int cascade_irq = cpm_get_irq(); |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 306888acb737..e0e68a1c0d3c 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -93,7 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) | |||
93 | dcr_write(msic->dcr_host, dcr_n, val); | 93 | dcr_write(msic->dcr_host, dcr_n, val); |
94 | } | 94 | } |
95 | 95 | ||
96 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | 96 | static void axon_msi_cascade(struct irq_desc *desc) |
97 | { | 97 | { |
98 | struct irq_chip *chip = irq_desc_get_chip(desc); | 98 | struct irq_chip *chip = irq_desc_get_chip(desc); |
99 | struct axon_msic *msic = irq_desc_get_handler_data(desc); | 99 | struct axon_msic *msic = irq_desc_get_handler_data(desc); |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index a15f1efc295f..9f609fc8d331 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -99,11 +99,12 @@ static void iic_ioexc_eoi(struct irq_data *d) | |||
99 | { | 99 | { |
100 | } | 100 | } |
101 | 101 | ||
102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | 102 | static void iic_ioexc_cascade(struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct irq_chip *chip = irq_desc_get_chip(desc); | 104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
105 | struct cbe_iic_regs __iomem *node_iic = | 105 | struct cbe_iic_regs __iomem *node_iic = |
106 | (void __iomem *)irq_desc_get_handler_data(desc); | 106 | (void __iomem *)irq_desc_get_handler_data(desc); |
107 | unsigned int irq = irq_desc_get_irq(desc); | ||
107 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; | 108 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; |
108 | unsigned long bits, ack; | 109 | unsigned long bits, ack; |
109 | int cascade; | 110 | int cascade; |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 1f72f4ab6353..9d27de62dc62 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -199,7 +199,7 @@ static const struct irq_domain_ops spider_host_ops = { | |||
199 | .xlate = spider_host_xlate, | 199 | .xlate = spider_host_xlate, |
200 | }; | 200 | }; |
201 | 201 | ||
202 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | 202 | static void spider_irq_cascade(struct irq_desc *desc) |
203 | { | 203 | { |
204 | struct irq_chip *chip = irq_desc_get_chip(desc); | 204 | struct irq_chip *chip = irq_desc_get_chip(desc); |
205 | struct spider_pic *pic = irq_desc_get_handler_data(desc); | 205 | struct spider_pic *pic = irq_desc_get_handler_data(desc); |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 15ebc4e8a151..987d1b8d68e3 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -363,7 +363,7 @@ void __init chrp_setup_arch(void) | |||
363 | if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); | 363 | if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); |
364 | } | 364 | } |
365 | 365 | ||
366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) | 366 | static void chrp_8259_cascade(struct irq_desc *desc) |
367 | { | 367 | { |
368 | struct irq_chip *chip = irq_desc_get_chip(desc); | 368 | struct irq_chip *chip = irq_desc_get_chip(desc); |
369 | unsigned int cascade_irq = i8259_irq(); | 369 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 9dd154d6f89a..9b7975706bfc 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c | |||
@@ -120,8 +120,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) | |||
120 | return irq_linear_revmap(h, irq); | 120 | return irq_linear_revmap(h, irq); |
121 | } | 121 | } |
122 | 122 | ||
123 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | 123 | static void hlwd_pic_irq_cascade(struct irq_desc *desc) |
124 | struct irq_desc *desc) | ||
125 | { | 124 | { |
126 | struct irq_chip *chip = irq_desc_get_chip(desc); | 125 | struct irq_chip *chip = irq_desc_get_chip(desc); |
127 | struct irq_domain *irq_domain = irq_desc_get_handler_data(desc); | 126 | struct irq_domain *irq_domain = irq_desc_get_handler_data(desc); |
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 1613303177e6..8f65aa3747f5 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c | |||
@@ -42,7 +42,7 @@ | |||
42 | static phys_addr_t pci_membase; | 42 | static phys_addr_t pci_membase; |
43 | static u_char *restart; | 43 | static u_char *restart; |
44 | 44 | ||
45 | static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc) | 45 | static void mvme5100_8259_cascade(struct irq_desc *desc) |
46 | { | 46 | { |
47 | struct irq_chip *chip = irq_desc_get_chip(desc); | 47 | struct irq_chip *chip = irq_desc_get_chip(desc); |
48 | unsigned int cascade_irq = i8259_irq(); | 48 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index e66ef1943338..b304a9fe55cc 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c | |||
@@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = { | |||
63 | static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) | 63 | static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) |
64 | { | 64 | { |
65 | struct msi_desc *entry; | 65 | struct msi_desc *entry; |
66 | irq_hw_number_t hwirq; | ||
66 | 67 | ||
67 | pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); | 68 | pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); |
68 | 69 | ||
@@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) | |||
70 | if (entry->irq == NO_IRQ) | 71 | if (entry->irq == NO_IRQ) |
71 | continue; | 72 | continue; |
72 | 73 | ||
74 | hwirq = virq_to_hw(entry->irq); | ||
73 | irq_set_msi_desc(entry->irq, NULL); | 75 | irq_set_msi_desc(entry->irq, NULL); |
74 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | ||
75 | virq_to_hw(entry->irq), ALLOC_CHUNK); | ||
76 | irq_dispose_mapping(entry->irq); | 76 | irq_dispose_mapping(entry->irq); |
77 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); | ||
77 | } | 78 | } |
78 | 79 | ||
79 | return; | 80 | return; |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 230f3a7cdea4..4296d55e88f3 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs) | |||
487 | * PRD component would have already got notified about this | 487 | * PRD component would have already got notified about this |
488 | * error through other channels. | 488 | * error through other channels. |
489 | * | 489 | * |
490 | * In any case, let us just fall through. We anyway heading | 490 | * If hardware marked this as an unrecoverable MCE, we are |
491 | * down to panic path. | 491 | * going to panic anyway. Even if it didn't, it's not safe to |
492 | * continue at this point, so we should explicitly panic. | ||
492 | */ | 493 | */ |
494 | |||
495 | panic("PowerNV Unrecovered Machine Check"); | ||
493 | return 0; | 496 | return 0; |
494 | } | 497 | } |
495 | 498 | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 2927cd5c8303..414fd1a00fda 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -2049,9 +2049,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) | |||
2049 | struct iommu_table *tbl = NULL; | 2049 | struct iommu_table *tbl = NULL; |
2050 | long rc; | 2050 | long rc; |
2051 | 2051 | ||
2052 | /* | ||
2053 | * crashkernel= specifies the kdump kernel's maximum memory at | ||
2054 | * some offset and there is no guaranteed the result is a power | ||
2055 | * of 2, which will cause errors later. | ||
2056 | */ | ||
2057 | const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); | ||
2058 | |||
2059 | /* | ||
2060 | * In memory constrained environments, e.g. kdump kernel, the | ||
2061 | * DMA window can be larger than available memory, which will | ||
2062 | * cause errors later. | ||
2063 | */ | ||
2064 | const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); | ||
2065 | |||
2052 | rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, | 2066 | rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, |
2053 | IOMMU_PAGE_SHIFT_4K, | 2067 | IOMMU_PAGE_SHIFT_4K, |
2054 | pe->table_group.tce32_size, | 2068 | window_size, |
2055 | POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); | 2069 | POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); |
2056 | if (rc) { | 2070 | if (rc) { |
2057 | pe_err(pe, "Failed to create 32-bit TCE table, err %ld", | 2071 | pe_err(pe, "Failed to create 32-bit TCE table, err %ld", |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 9b2480b265c0..f2dd77234240 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev) | |||
99 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | 99 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
100 | struct pnv_phb *phb = hose->private_data; | 100 | struct pnv_phb *phb = hose->private_data; |
101 | struct msi_desc *entry; | 101 | struct msi_desc *entry; |
102 | irq_hw_number_t hwirq; | ||
102 | 103 | ||
103 | if (WARN_ON(!phb)) | 104 | if (WARN_ON(!phb)) |
104 | return; | 105 | return; |
@@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev) | |||
106 | for_each_pci_msi_entry(entry, pdev) { | 107 | for_each_pci_msi_entry(entry, pdev) { |
107 | if (entry->irq == NO_IRQ) | 108 | if (entry->irq == NO_IRQ) |
108 | continue; | 109 | continue; |
110 | hwirq = virq_to_hw(entry->irq); | ||
109 | irq_set_msi_desc(entry->irq, NULL); | 111 | irq_set_msi_desc(entry->irq, NULL); |
110 | msi_bitmap_free_hwirqs(&phb->msi_bmp, | ||
111 | virq_to_hw(entry->irq) - phb->msi_base, 1); | ||
112 | irq_dispose_mapping(entry->irq); | 112 | irq_dispose_mapping(entry->irq); |
113 | msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); | ||
113 | } | 114 | } |
114 | } | 115 | } |
115 | #endif /* CONFIG_PCI_MSI */ | 116 | #endif /* CONFIG_PCI_MSI */ |
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index 09787139834d..3db53e8aff92 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c | |||
@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = { | |||
194 | .key = OS_AREA_DB_KEY_RTC_DIFF | 194 | .key = OS_AREA_DB_KEY_RTC_DIFF |
195 | }; | 195 | }; |
196 | 196 | ||
197 | static const struct os_area_db_id os_area_db_id_video_mode = { | ||
198 | .owner = OS_AREA_DB_OWNER_LINUX, | ||
199 | .key = OS_AREA_DB_KEY_VIDEO_MODE | ||
200 | }; | ||
201 | |||
202 | #define SECONDS_FROM_1970_TO_2000 946684800LL | 197 | #define SECONDS_FROM_1970_TO_2000 946684800LL |
203 | 198 | ||
204 | /** | 199 | /** |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 47d9cebe7159..db17827eb746 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
422 | 422 | ||
423 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); | 423 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); |
424 | of_node_put(parent); | 424 | of_node_put(parent); |
425 | if (!dn) | 425 | if (!dn) { |
426 | dlpar_release_drc(drc_index); | ||
426 | return -EINVAL; | 427 | return -EINVAL; |
428 | } | ||
427 | 429 | ||
428 | rc = dlpar_attach_node(dn); | 430 | rc = dlpar_attach_node(dn); |
429 | if (rc) { | 431 | if (rc) { |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 39a74fad3e04..9a83eb71b030 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -111,7 +111,7 @@ static void __init fwnmi_init(void) | |||
111 | fwnmi_active = 1; | 111 | fwnmi_active = 1; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) | 114 | static void pseries_8259_cascade(struct irq_desc *desc) |
115 | { | 115 | { |
116 | struct irq_chip *chip = irq_desc_get_chip(desc); | 116 | struct irq_chip *chip = irq_desc_get_chip(desc); |
117 | unsigned int cascade_irq = i8259_irq(); | 117 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index a11bd1d433ad..9e86074719a9 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c | |||
@@ -155,9 +155,9 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
155 | 155 | ||
156 | irqd_set_trigger_type(d, flow_type); | 156 | irqd_set_trigger_type(d, flow_type); |
157 | if (flow_type & IRQ_TYPE_LEVEL_LOW) | 157 | if (flow_type & IRQ_TYPE_LEVEL_LOW) |
158 | __irq_set_handler_locked(d->irq, handle_level_irq); | 158 | irq_set_handler_locked(d, handle_level_irq); |
159 | else | 159 | else |
160 | __irq_set_handler_locked(d->irq, handle_edge_irq); | 160 | irq_set_handler_locked(d, handle_edge_irq); |
161 | 161 | ||
162 | /* internal IRQ senses are LEVEL_LOW | 162 | /* internal IRQ senses are LEVEL_LOW |
163 | * EXT IRQ and Port C IRQ senses are programmable | 163 | * EXT IRQ and Port C IRQ senses are programmable |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 5916da1856a7..48a576aa47b9 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) | |||
128 | { | 128 | { |
129 | struct msi_desc *entry; | 129 | struct msi_desc *entry; |
130 | struct fsl_msi *msi_data; | 130 | struct fsl_msi *msi_data; |
131 | irq_hw_number_t hwirq; | ||
131 | 132 | ||
132 | for_each_pci_msi_entry(entry, pdev) { | 133 | for_each_pci_msi_entry(entry, pdev) { |
133 | if (entry->irq == NO_IRQ) | 134 | if (entry->irq == NO_IRQ) |
134 | continue; | 135 | continue; |
136 | hwirq = virq_to_hw(entry->irq); | ||
135 | msi_data = irq_get_chip_data(entry->irq); | 137 | msi_data = irq_get_chip_data(entry->irq); |
136 | irq_set_msi_desc(entry->irq, NULL); | 138 | irq_set_msi_desc(entry->irq, NULL); |
137 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | ||
138 | virq_to_hw(entry->irq), 1); | ||
139 | irq_dispose_mapping(entry->irq); | 139 | irq_dispose_mapping(entry->irq); |
140 | msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); | ||
140 | } | 141 | } |
141 | 142 | ||
142 | return; | 143 | return; |
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index 2bcb78bb3a15..d57b77573068 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c | |||
@@ -91,7 +91,7 @@ static int gef_pic_cascade_irq; | |||
91 | * should be masked out. | 91 | * should be masked out. |
92 | */ | 92 | */ |
93 | 93 | ||
94 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | 94 | static void gef_pic_cascade(struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct irq_chip *chip = irq_desc_get_chip(desc); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | unsigned int cascade_irq; | 97 | unsigned int cascade_irq; |
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h index 908dbd9826b6..5bf7e4b81e36 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.h +++ b/arch/powerpc/sysdev/ge/ge_pic.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef __GEF_PIC_H__ | 1 | #ifndef __GEF_PIC_H__ |
2 | #define __GEF_PIC_H__ | 2 | #define __GEF_PIC_H__ |
3 | 3 | ||
4 | |||
5 | void gef_pic_cascade(unsigned int, struct irq_desc *); | ||
6 | unsigned int gef_pic_get_irq(void); | 4 | unsigned int gef_pic_get_irq(void); |
7 | void gef_pic_init(struct device_node *); | 5 | void gef_pic_init(struct device_node *); |
8 | 6 | ||
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 6b2b68914810..b1297ab1599b 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -624,10 +624,10 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
624 | 624 | ||
625 | irqd_set_trigger_type(d, flow_type); | 625 | irqd_set_trigger_type(d, flow_type); |
626 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { | 626 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { |
627 | __irq_set_handler_locked(d->irq, handle_level_irq); | 627 | irq_set_handler_locked(d, handle_level_irq); |
628 | d->chip = &ipic_level_irq_chip; | 628 | d->chip = &ipic_level_irq_chip; |
629 | } else { | 629 | } else { |
630 | __irq_set_handler_locked(d->irq, handle_edge_irq); | 630 | irq_set_handler_locked(d, handle_edge_irq); |
631 | d->chip = &ipic_edge_irq_chip; | 631 | d->chip = &ipic_edge_irq_chip; |
632 | } | 632 | } |
633 | 633 | ||
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d93a78be4346..9a423975853a 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c | |||
@@ -55,7 +55,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
55 | unsigned int siel = in_be32(&siu_reg->sc_siel); | 55 | unsigned int siel = in_be32(&siu_reg->sc_siel); |
56 | siel |= mpc8xx_irqd_to_bit(d); | 56 | siel |= mpc8xx_irqd_to_bit(d); |
57 | out_be32(&siu_reg->sc_siel, siel); | 57 | out_be32(&siu_reg->sc_siel, siel); |
58 | __irq_set_handler_locked(d->irq, handle_edge_irq); | 58 | irq_set_handler_locked(d, handle_edge_irq); |
59 | } | 59 | } |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 97a8ae8f94dd..537e5db85a06 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -1181,7 +1181,7 @@ static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, | |||
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ | 1183 | /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ |
1184 | static void mpic_cascade(unsigned int irq, struct irq_desc *desc) | 1184 | static void mpic_cascade(struct irq_desc *desc) |
1185 | { | 1185 | { |
1186 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1186 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1187 | struct mpic *mpic = irq_desc_get_handler_data(desc); | 1187 | struct mpic *mpic = irq_desc_get_handler_data(desc); |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 70fbd5694a8b..2cbc7e29b85f 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) | |||
107 | static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) | 107 | static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) |
108 | { | 108 | { |
109 | struct msi_desc *entry; | 109 | struct msi_desc *entry; |
110 | irq_hw_number_t hwirq; | ||
110 | 111 | ||
111 | for_each_pci_msi_entry(entry, pdev) { | 112 | for_each_pci_msi_entry(entry, pdev) { |
112 | if (entry->irq == NO_IRQ) | 113 | if (entry->irq == NO_IRQ) |
113 | continue; | 114 | continue; |
114 | 115 | ||
116 | hwirq = virq_to_hw(entry->irq); | ||
115 | irq_set_msi_desc(entry->irq, NULL); | 117 | irq_set_msi_desc(entry->irq, NULL); |
116 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | ||
117 | virq_to_hw(entry->irq), 1); | ||
118 | irq_dispose_mapping(entry->irq); | 118 | irq_dispose_mapping(entry->irq); |
119 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); | ||
119 | } | 120 | } |
120 | 121 | ||
121 | return; | 122 | return; |
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c index 24d0470c1698..8fb806135043 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_msi.c | |||
@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) | |||
124 | { | 124 | { |
125 | struct msi_desc *entry; | 125 | struct msi_desc *entry; |
126 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | 126 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; |
127 | irq_hw_number_t hwirq; | ||
127 | 128 | ||
128 | dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); | 129 | dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); |
129 | 130 | ||
130 | for_each_pci_msi_entry(entry, dev) { | 131 | for_each_pci_msi_entry(entry, dev) { |
131 | if (entry->irq == NO_IRQ) | 132 | if (entry->irq == NO_IRQ) |
132 | continue; | 133 | continue; |
134 | hwirq = virq_to_hw(entry->irq); | ||
133 | irq_set_msi_desc(entry->irq, NULL); | 135 | irq_set_msi_desc(entry->irq, NULL); |
134 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | ||
135 | virq_to_hw(entry->irq), 1); | ||
136 | irq_dispose_mapping(entry->irq); | 136 | irq_dispose_mapping(entry->irq); |
137 | msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); | ||
137 | } | 138 | } |
138 | } | 139 | } |
139 | 140 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 47b352e4bc74..fbcc1f855a7f 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -311,8 +311,8 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | void __init qe_ic_init(struct device_node *node, unsigned int flags, | 313 | void __init qe_ic_init(struct device_node *node, unsigned int flags, |
314 | void (*low_handler)(unsigned int irq, struct irq_desc *desc), | 314 | void (*low_handler)(struct irq_desc *desc), |
315 | void (*high_handler)(unsigned int irq, struct irq_desc *desc)) | 315 | void (*high_handler)(struct irq_desc *desc)) |
316 | { | 316 | { |
317 | struct qe_ic *qe_ic; | 317 | struct qe_ic *qe_ic; |
318 | struct resource res; | 318 | struct resource res; |
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 57b54476e747..379de955aae3 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c | |||
@@ -428,7 +428,7 @@ void __init tsi108_pci_int_init(struct device_node *node) | |||
428 | init_pci_source(); | 428 | init_pci_source(); |
429 | } | 429 | } |
430 | 430 | ||
431 | void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) | 431 | void tsi108_irq_cascade(struct irq_desc *desc) |
432 | { | 432 | { |
433 | struct irq_chip *chip = irq_desc_get_chip(desc); | 433 | struct irq_chip *chip = irq_desc_get_chip(desc); |
434 | unsigned int cascade_irq = get_pci_source(); | 434 | unsigned int cascade_irq = get_pci_source(); |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index d77345338671..6893d8f236df 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -194,7 +194,7 @@ static const struct irq_domain_ops uic_host_ops = { | |||
194 | .xlate = irq_domain_xlate_twocell, | 194 | .xlate = irq_domain_xlate_twocell, |
195 | }; | 195 | }; |
196 | 196 | ||
197 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | 197 | static void uic_irq_cascade(struct irq_desc *desc) |
198 | { | 198 | { |
199 | struct irq_chip *chip = irq_desc_get_chip(desc); | 199 | struct irq_chip *chip = irq_desc_get_chip(desc); |
200 | struct irq_data *idata = irq_desc_get_irq_data(desc); | 200 | struct irq_data *idata = irq_desc_get_irq_data(desc); |
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 11ac964d5175..27c936c080a6 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c | |||
@@ -54,7 +54,7 @@ static void ics_opal_unmask_irq(struct irq_data *d) | |||
54 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | 54 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) |
55 | return; | 55 | return; |
56 | 56 | ||
57 | server = xics_get_irq_server(d->irq, d->affinity, 0); | 57 | server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); |
58 | server = ics_opal_mangle_server(server); | 58 | server = ics_opal_mangle_server(server); |
59 | 59 | ||
60 | rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); | 60 | rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); |
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index d1c625c4cc5a..3854dd41558d 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c | |||
@@ -47,7 +47,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) | |||
47 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | 47 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | server = xics_get_irq_server(d->irq, d->affinity, 0); | 50 | server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); |
51 | 51 | ||
52 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, | 52 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, |
53 | DEFAULT_PRIORITY); | 53 | DEFAULT_PRIORITY); |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 43b8b275bc5c..0f52d7955796 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -222,7 +222,7 @@ int xilinx_intc_get_irq(void) | |||
222 | /* | 222 | /* |
223 | * Support code for cascading to 8259 interrupt controllers | 223 | * Support code for cascading to 8259 interrupt controllers |
224 | */ | 224 | */ |
225 | static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | 225 | static void xilinx_i8259_cascade(struct irq_desc *desc) |
226 | { | 226 | { |
227 | struct irq_chip *chip = irq_desc_get_chip(desc); | 227 | struct irq_chip *chip = irq_desc_get_chip(desc); |
228 | unsigned int cascade_irq = i8259_irq(); | 228 | unsigned int cascade_irq = i8259_irq(); |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index d4788111c161..fac6ac9790fa 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o | |||
10 | 10 | ||
11 | KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 11 | KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
12 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 12 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
13 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks | 13 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float |
14 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) | 14 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) |
15 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) | 15 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) |
16 | 16 | ||
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0c98f1508542..ed7da281df66 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m | |||
381 | CONFIG_SCSI_DEBUG=m | 381 | CONFIG_SCSI_DEBUG=m |
382 | CONFIG_ZFCP=y | 382 | CONFIG_ZFCP=y |
383 | CONFIG_SCSI_VIRTIO=m | 383 | CONFIG_SCSI_VIRTIO=m |
384 | CONFIG_SCSI_DH=m | 384 | CONFIG_SCSI_DH=y |
385 | CONFIG_SCSI_DH_RDAC=m | 385 | CONFIG_SCSI_DH_RDAC=m |
386 | CONFIG_SCSI_DH_HP_SW=m | 386 | CONFIG_SCSI_DH_HP_SW=m |
387 | CONFIG_SCSI_DH_EMC=m | 387 | CONFIG_SCSI_DH_EMC=m |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 82083e1fbdc4..9858b14cde1e 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m | |||
377 | CONFIG_SCSI_DEBUG=m | 377 | CONFIG_SCSI_DEBUG=m |
378 | CONFIG_ZFCP=y | 378 | CONFIG_ZFCP=y |
379 | CONFIG_SCSI_VIRTIO=m | 379 | CONFIG_SCSI_VIRTIO=m |
380 | CONFIG_SCSI_DH=m | 380 | CONFIG_SCSI_DH=y |
381 | CONFIG_SCSI_DH_RDAC=m | 381 | CONFIG_SCSI_DH_RDAC=m |
382 | CONFIG_SCSI_DH_HP_SW=m | 382 | CONFIG_SCSI_DH_HP_SW=m |
383 | CONFIG_SCSI_DH_EMC=m | 383 | CONFIG_SCSI_DH_EMC=m |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c05c9e0821e3..7f14f80717d4 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m | |||
377 | CONFIG_SCSI_DEBUG=m | 377 | CONFIG_SCSI_DEBUG=m |
378 | CONFIG_ZFCP=y | 378 | CONFIG_ZFCP=y |
379 | CONFIG_SCSI_VIRTIO=m | 379 | CONFIG_SCSI_VIRTIO=m |
380 | CONFIG_SCSI_DH=m | 380 | CONFIG_SCSI_DH=y |
381 | CONFIG_SCSI_DH_RDAC=m | 381 | CONFIG_SCSI_DH_RDAC=m |
382 | CONFIG_SCSI_DH_HP_SW=m | 382 | CONFIG_SCSI_DH_HP_SW=m |
383 | CONFIG_SCSI_DH_EMC=m | 383 | CONFIG_SCSI_DH_EMC=m |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1b0184a0f7f2..92805d604173 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
2 | CONFIG_NO_HZ=y | 2 | CONFIG_NO_HZ=y |
3 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
4 | CONFIG_RCU_FAST_NO_HZ=y | ||
5 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
6 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 5 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
7 | # CONFIG_COMPAT_BRK is not set | 6 | # CONFIG_COMPAT_BRK is not set |
@@ -54,10 +53,6 @@ CONFIG_RAW_DRIVER=y | |||
54 | # CONFIG_MONWRITER is not set | 53 | # CONFIG_MONWRITER is not set |
55 | # CONFIG_S390_VMUR is not set | 54 | # CONFIG_S390_VMUR is not set |
56 | # CONFIG_HID is not set | 55 | # CONFIG_HID is not set |
57 | CONFIG_MEMSTICK=y | ||
58 | CONFIG_MEMSTICK_DEBUG=y | ||
59 | CONFIG_MEMSTICK_UNSAFE_RESUME=y | ||
60 | CONFIG_MSPRO_BLOCK=y | ||
61 | # CONFIG_IOMMU_SUPPORT is not set | 56 | # CONFIG_IOMMU_SUPPORT is not set |
62 | CONFIG_EXT2_FS=y | 57 | CONFIG_EXT2_FS=y |
63 | CONFIG_EXT3_FS=y | 58 | CONFIG_EXT3_FS=y |
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild | |||
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h | |||
6 | generic-y += mm-arch-hooks.h | 6 | generic-y += mm-arch-hooks.h |
7 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += trace_clock.h | 8 | generic-y += trace_clock.h |
9 | generic-y += word-at-a-time.h | ||
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3d012e071647..8ced426091e1 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -35,6 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | #define KVM_NR_IRQCHIPS 1 | 36 | #define KVM_NR_IRQCHIPS 1 |
37 | #define KVM_IRQCHIP_NUM_PINS 4096 | 37 | #define KVM_IRQCHIP_NUM_PINS 4096 |
38 | #define KVM_HALT_POLL_NS_DEFAULT 0 | ||
38 | 39 | ||
39 | #define SIGP_CTRL_C 0x80 | 40 | #define SIGP_CTRL_C 0x80 |
40 | #define SIGP_CTRL_SCN_MASK 0x3f | 41 | #define SIGP_CTRL_SCN_MASK 0x3f |
@@ -210,6 +211,7 @@ struct kvm_vcpu_stat { | |||
210 | u32 exit_validity; | 211 | u32 exit_validity; |
211 | u32 exit_instruction; | 212 | u32 exit_instruction; |
212 | u32 halt_successful_poll; | 213 | u32 halt_successful_poll; |
214 | u32 halt_attempted_poll; | ||
213 | u32 halt_wakeup; | 215 | u32 halt_wakeup; |
214 | u32 instruction_lctl; | 216 | u32 instruction_lctl; |
215 | u32 instruction_lctlg; | 217 | u32 instruction_lctlg; |
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 2a0efc63b9e5..dc19ee0c92aa 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h | |||
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn); | |||
19 | int __node_distance(int a, int b); | 19 | int __node_distance(int a, int b); |
20 | void numa_update_cpu_topology(void); | 20 | void numa_update_cpu_topology(void); |
21 | 21 | ||
22 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 22 | extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; |
23 | extern int numa_debug_enabled; | 23 | extern int numa_debug_enabled; |
24 | 24 | ||
25 | #else | 25 | #else |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 27ebde643933..94fc55fc72ce 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu) | |||
68 | #define cpumask_of_node cpumask_of_node | 68 | #define cpumask_of_node cpumask_of_node |
69 | static inline const struct cpumask *cpumask_of_node(int node) | 69 | static inline const struct cpumask *cpumask_of_node(int node) |
70 | { | 70 | { |
71 | return node_to_cpumask_map[node]; | 71 | return &node_to_cpumask_map[node]; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | 74 | /* |
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 525cef73b085..02613bad8bbb 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h | |||
@@ -8,28 +8,8 @@ | |||
8 | 8 | ||
9 | #include <uapi/asm/unistd.h> | 9 | #include <uapi/asm/unistd.h> |
10 | 10 | ||
11 | |||
12 | #define __IGNORE_time | 11 | #define __IGNORE_time |
13 | 12 | ||
14 | /* Ignore system calls that are also reachable via sys_socketcall */ | ||
15 | #define __IGNORE_recvmmsg | ||
16 | #define __IGNORE_sendmmsg | ||
17 | #define __IGNORE_socket | ||
18 | #define __IGNORE_socketpair | ||
19 | #define __IGNORE_bind | ||
20 | #define __IGNORE_connect | ||
21 | #define __IGNORE_listen | ||
22 | #define __IGNORE_accept4 | ||
23 | #define __IGNORE_getsockopt | ||
24 | #define __IGNORE_setsockopt | ||
25 | #define __IGNORE_getsockname | ||
26 | #define __IGNORE_getpeername | ||
27 | #define __IGNORE_sendto | ||
28 | #define __IGNORE_sendmsg | ||
29 | #define __IGNORE_recvfrom | ||
30 | #define __IGNORE_recvmsg | ||
31 | #define __IGNORE_shutdown | ||
32 | |||
33 | #define __ARCH_WANT_OLD_READDIR | 13 | #define __ARCH_WANT_OLD_READDIR |
34 | #define __ARCH_WANT_SYS_ALARM | 14 | #define __ARCH_WANT_SYS_ALARM |
35 | #define __ARCH_WANT_SYS_GETHOSTNAME | 15 | #define __ARCH_WANT_SYS_GETHOSTNAME |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 59d2bb4e2d0c..a848adba1504 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
@@ -290,7 +290,26 @@ | |||
290 | #define __NR_s390_pci_mmio_write 352 | 290 | #define __NR_s390_pci_mmio_write 352 |
291 | #define __NR_s390_pci_mmio_read 353 | 291 | #define __NR_s390_pci_mmio_read 353 |
292 | #define __NR_execveat 354 | 292 | #define __NR_execveat 354 |
293 | #define NR_syscalls 355 | 293 | #define __NR_userfaultfd 355 |
294 | #define __NR_membarrier 356 | ||
295 | #define __NR_recvmmsg 357 | ||
296 | #define __NR_sendmmsg 358 | ||
297 | #define __NR_socket 359 | ||
298 | #define __NR_socketpair 360 | ||
299 | #define __NR_bind 361 | ||
300 | #define __NR_connect 362 | ||
301 | #define __NR_listen 363 | ||
302 | #define __NR_accept4 364 | ||
303 | #define __NR_getsockopt 365 | ||
304 | #define __NR_setsockopt 366 | ||
305 | #define __NR_getsockname 367 | ||
306 | #define __NR_getpeername 368 | ||
307 | #define __NR_sendto 369 | ||
308 | #define __NR_sendmsg 370 | ||
309 | #define __NR_recvfrom 371 | ||
310 | #define __NR_recvmsg 372 | ||
311 | #define __NR_shutdown 373 | ||
312 | #define NR_syscalls 374 | ||
294 | 313 | ||
295 | /* | 314 | /* |
296 | * There are some system calls that are not present on 64 bit, some | 315 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 48c9af7a7683..3aeeb1b562c0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -176,6 +176,7 @@ int main(void) | |||
176 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | 176 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); |
177 | DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); | 177 | DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); |
178 | DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); | 178 | DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); |
179 | DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); | ||
179 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); | 180 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); |
180 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); | 181 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); |
181 | DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); | 182 | DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index eb4664238613..e0f9d270b30f 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -48,6 +48,19 @@ typedef struct | |||
48 | struct ucontext32 uc; | 48 | struct ucontext32 uc; |
49 | } rt_sigframe32; | 49 | } rt_sigframe32; |
50 | 50 | ||
51 | static inline void sigset_to_sigset32(unsigned long *set64, | ||
52 | compat_sigset_word *set32) | ||
53 | { | ||
54 | set32[0] = (compat_sigset_word) set64[0]; | ||
55 | set32[1] = (compat_sigset_word)(set64[0] >> 32); | ||
56 | } | ||
57 | |||
58 | static inline void sigset32_to_sigset(compat_sigset_word *set32, | ||
59 | unsigned long *set64) | ||
60 | { | ||
61 | set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32); | ||
62 | } | ||
63 | |||
51 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | 64 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) |
52 | { | 65 | { |
53 | int err; | 66 | int err; |
@@ -281,10 +294,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) | |||
281 | { | 294 | { |
282 | struct pt_regs *regs = task_pt_regs(current); | 295 | struct pt_regs *regs = task_pt_regs(current); |
283 | sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; | 296 | sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; |
297 | compat_sigset_t cset; | ||
284 | sigset_t set; | 298 | sigset_t set; |
285 | 299 | ||
286 | if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) | 300 | if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) |
287 | goto badframe; | 301 | goto badframe; |
302 | sigset32_to_sigset(cset.sig, set.sig); | ||
288 | set_current_blocked(&set); | 303 | set_current_blocked(&set); |
289 | save_fpu_regs(); | 304 | save_fpu_regs(); |
290 | if (restore_sigregs32(regs, &frame->sregs)) | 305 | if (restore_sigregs32(regs, &frame->sregs)) |
@@ -302,10 +317,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) | |||
302 | { | 317 | { |
303 | struct pt_regs *regs = task_pt_regs(current); | 318 | struct pt_regs *regs = task_pt_regs(current); |
304 | rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; | 319 | rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; |
320 | compat_sigset_t cset; | ||
305 | sigset_t set; | 321 | sigset_t set; |
306 | 322 | ||
307 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 323 | if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset))) |
308 | goto badframe; | 324 | goto badframe; |
325 | sigset32_to_sigset(cset.sig, set.sig); | ||
309 | set_current_blocked(&set); | 326 | set_current_blocked(&set); |
310 | if (compat_restore_altstack(&frame->uc.uc_stack)) | 327 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
311 | goto badframe; | 328 | goto badframe; |
@@ -377,7 +394,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, | |||
377 | return -EFAULT; | 394 | return -EFAULT; |
378 | 395 | ||
379 | /* Create struct sigcontext32 on the signal stack */ | 396 | /* Create struct sigcontext32 on the signal stack */ |
380 | memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32); | 397 | sigset_to_sigset32(set->sig, sc.oldmask); |
381 | sc.sregs = (__u32)(unsigned long __force) &frame->sregs; | 398 | sc.sregs = (__u32)(unsigned long __force) &frame->sregs; |
382 | if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc))) | 399 | if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc))) |
383 | return -EFAULT; | 400 | return -EFAULT; |
@@ -438,6 +455,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, | |||
438 | static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, | 455 | static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, |
439 | struct pt_regs *regs) | 456 | struct pt_regs *regs) |
440 | { | 457 | { |
458 | compat_sigset_t cset; | ||
441 | rt_sigframe32 __user *frame; | 459 | rt_sigframe32 __user *frame; |
442 | unsigned long restorer; | 460 | unsigned long restorer; |
443 | size_t frame_size; | 461 | size_t frame_size; |
@@ -485,11 +503,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, | |||
485 | store_sigregs(); | 503 | store_sigregs(); |
486 | 504 | ||
487 | /* Create ucontext on the signal stack. */ | 505 | /* Create ucontext on the signal stack. */ |
506 | sigset_to_sigset32(set->sig, cset.sig); | ||
488 | if (__put_user(uc_flags, &frame->uc.uc_flags) || | 507 | if (__put_user(uc_flags, &frame->uc.uc_flags) || |
489 | __put_user(0, &frame->uc.uc_link) || | 508 | __put_user(0, &frame->uc.uc_link) || |
490 | __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || | 509 | __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || |
491 | save_sigregs32(regs, &frame->uc.uc_mcontext) || | 510 | save_sigregs32(regs, &frame->uc.uc_mcontext) || |
492 | __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) || | 511 | __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) || |
493 | save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) | 512 | save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) |
494 | return -EFAULT; | 513 | return -EFAULT; |
495 | 514 | ||
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index f8498dde67b1..09f194052df3 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
@@ -52,15 +52,13 @@ | |||
52 | * the regular system call wrappers. | 52 | * the regular system call wrappers. |
53 | */ | 53 | */ |
54 | #define COMPAT_SYSCALL_WRAPx(x, name, ...) \ | 54 | #define COMPAT_SYSCALL_WRAPx(x, name, ...) \ |
55 | asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ | 55 | asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ |
56 | asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ | 56 | asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ |
57 | asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ | 57 | asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ |
58 | { \ | 58 | { \ |
59 | return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ | 59 | return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ |
60 | } | 60 | } |
61 | 61 | ||
62 | COMPAT_SYSCALL_WRAP1(exit, int, error_code); | ||
63 | COMPAT_SYSCALL_WRAP1(close, unsigned int, fd); | ||
64 | COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); | 62 | COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); |
65 | COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); | 63 | COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); |
66 | COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); | 64 | COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); |
@@ -68,23 +66,16 @@ COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename); | |||
68 | COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); | 66 | COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); |
69 | COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); | 67 | COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); |
70 | COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); | 68 | COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); |
71 | COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds); | ||
72 | COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); | 69 | COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); |
73 | COMPAT_SYSCALL_WRAP1(nice, int, increment); | ||
74 | COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig); | ||
75 | COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); | 70 | COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); |
76 | COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); | 71 | COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); |
77 | COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); | 72 | COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); |
78 | COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes); | ||
79 | COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); | 73 | COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); |
80 | COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); | 74 | COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); |
81 | COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); | 75 | COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); |
82 | COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); | 76 | COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); |
83 | COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); | 77 | COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); |
84 | COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid); | ||
85 | COMPAT_SYSCALL_WRAP1(umask, int, mask); | ||
86 | COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); | 78 | COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); |
87 | COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd); | ||
88 | COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); | 79 | COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); |
89 | COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); | 80 | COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); |
90 | COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); | 81 | COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); |
@@ -93,37 +84,23 @@ COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library); | |||
93 | COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); | 84 | COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); |
94 | COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); | 85 | COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); |
95 | COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); | 86 | COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); |
96 | COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode); | ||
97 | COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who); | ||
98 | COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval); | ||
99 | COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); | 87 | COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); |
100 | COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); | 88 | COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); |
101 | COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd); | ||
102 | COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); | 89 | COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); |
103 | COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); | 90 | COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); |
104 | COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); | 91 | COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); |
105 | COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); | 92 | COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); |
106 | COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); | 93 | COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); |
107 | COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); | 94 | COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); |
108 | COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid); | ||
109 | COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd); | ||
110 | COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); | 95 | COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); |
111 | COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); | 96 | COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); |
112 | COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality); | ||
113 | COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); | 97 | COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); |
114 | COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd); | ||
115 | COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); | 98 | COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); |
116 | COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid); | ||
117 | COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd); | ||
118 | COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); | 99 | COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); |
119 | COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); | 100 | COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); |
120 | COMPAT_SYSCALL_WRAP1(mlockall, int, flags); | ||
121 | COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); | 101 | COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); |
122 | COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); | 102 | COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); |
123 | COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); | 103 | COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); |
124 | COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid); | ||
125 | COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy); | ||
126 | COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy); | ||
127 | COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); | 104 | COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); |
128 | COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); | 105 | COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); |
129 | COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); | 106 | COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); |
@@ -131,20 +108,11 @@ COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size); | |||
131 | COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); | 108 | COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); |
132 | COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); | 109 | COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); |
133 | COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); | 110 | COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); |
134 | COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid); | ||
135 | COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid); | ||
136 | COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); | 111 | COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); |
137 | COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); | 112 | COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); |
138 | COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group); | ||
139 | COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid); | ||
140 | COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); | 113 | COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); |
141 | COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid); | ||
142 | COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); | 114 | COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); |
143 | COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); | 115 | COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); |
144 | COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid); | ||
145 | COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid); | ||
146 | COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid); | ||
147 | COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid); | ||
148 | COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); | 116 | COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); |
149 | COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); | 117 | COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); |
150 | COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); | 118 | COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); |
@@ -161,23 +129,16 @@ COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size); | |||
161 | COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); | 129 | COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); |
162 | COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); | 130 | COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); |
163 | COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); | 131 | COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); |
164 | COMPAT_SYSCALL_WRAP1(exit_group, int, error_code); | ||
165 | COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); | 132 | COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); |
166 | COMPAT_SYSCALL_WRAP1(epoll_create, int, size); | ||
167 | COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); | 133 | COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); |
168 | COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); | 134 | COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); |
169 | COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id); | ||
170 | COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id); | ||
171 | COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); | 135 | COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); |
172 | COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); | 136 | COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); |
173 | COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); | 137 | COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); |
174 | COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); | 138 | COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); |
175 | COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); | 139 | COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); |
176 | COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); | 140 | COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); |
177 | COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio); | ||
178 | COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who); | ||
179 | COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); | 141 | COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); |
180 | COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd); | ||
181 | COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); | 142 | COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); |
182 | COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); | 143 | COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); |
183 | COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); | 144 | COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); |
@@ -192,23 +153,11 @@ COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags); | |||
192 | COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); | 153 | COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); |
193 | COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); | 154 | COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); |
194 | COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); | 155 | COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); |
195 | COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count); | ||
196 | COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags); | ||
197 | COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags); | ||
198 | COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags); | ||
199 | COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); | 156 | COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); |
200 | COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags); | ||
201 | COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags); | ||
202 | COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig); | ||
203 | COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig); | ||
204 | COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); | 157 | COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); |
205 | COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); | 158 | COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); |
206 | COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags); | ||
207 | COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); | 159 | COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); |
208 | COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); | 160 | COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); |
209 | COMPAT_SYSCALL_WRAP1(syncfs, int, fd); | ||
210 | COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype); | ||
211 | COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum); | ||
212 | COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); | 161 | COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); |
213 | COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); | 162 | COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); |
214 | COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); | 163 | COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); |
@@ -220,3 +169,10 @@ COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, fla | |||
220 | COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); | 169 | COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); |
221 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); | 170 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); |
222 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); | 171 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); |
172 | COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec); | ||
173 | COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen); | ||
174 | COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen); | ||
175 | COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags); | ||
176 | COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); | ||
177 | COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); | ||
178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 247b7aae4c6d..582fe44ab07c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -733,6 +733,14 @@ ENTRY(psw_idle) | |||
733 | stg %r3,__SF_EMPTY(%r15) | 733 | stg %r3,__SF_EMPTY(%r15) |
734 | larl %r1,.Lpsw_idle_lpsw+4 | 734 | larl %r1,.Lpsw_idle_lpsw+4 |
735 | stg %r1,__SF_EMPTY+8(%r15) | 735 | stg %r1,__SF_EMPTY+8(%r15) |
736 | #ifdef CONFIG_SMP | ||
737 | larl %r1,smp_cpu_mtid | ||
738 | llgf %r1,0(%r1) | ||
739 | ltgr %r1,%r1 | ||
740 | jz .Lpsw_idle_stcctm | ||
741 | .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) | ||
742 | .Lpsw_idle_stcctm: | ||
743 | #endif | ||
736 | STCK __CLOCK_IDLE_ENTER(%r2) | 744 | STCK __CLOCK_IDLE_ENTER(%r2) |
737 | stpt __TIMER_IDLE_ENTER(%r2) | 745 | stpt __TIMER_IDLE_ENTER(%r2) |
738 | .Lpsw_idle_lpsw: | 746 | .Lpsw_idle_lpsw: |
@@ -1159,7 +1167,27 @@ cleanup_critical: | |||
1159 | jhe 1f | 1167 | jhe 1f |
1160 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | 1168 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) |
1161 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) | 1169 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) |
1162 | 1: # account system time going idle | 1170 | 1: # calculate idle cycles |
1171 | #ifdef CONFIG_SMP | ||
1172 | clg %r9,BASED(.Lcleanup_idle_insn) | ||
1173 | jl 3f | ||
1174 | larl %r1,smp_cpu_mtid | ||
1175 | llgf %r1,0(%r1) | ||
1176 | ltgr %r1,%r1 | ||
1177 | jz 3f | ||
1178 | .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) | ||
1179 | larl %r3,mt_cycles | ||
1180 | ag %r3,__LC_PERCPU_OFFSET | ||
1181 | la %r4,__SF_EMPTY+16(%r15) | ||
1182 | 2: lg %r0,0(%r3) | ||
1183 | slg %r0,0(%r4) | ||
1184 | alg %r0,64(%r4) | ||
1185 | stg %r0,0(%r3) | ||
1186 | la %r3,8(%r3) | ||
1187 | la %r4,8(%r4) | ||
1188 | brct %r1,2b | ||
1189 | #endif | ||
1190 | 3: # account system time going idle | ||
1163 | lg %r9,__LC_STEAL_TIMER | 1191 | lg %r9,__LC_STEAL_TIMER |
1164 | alg %r9,__CLOCK_IDLE_ENTER(%r2) | 1192 | alg %r9,__CLOCK_IDLE_ENTER(%r2) |
1165 | slg %r9,__LC_LAST_UPDATE_CLOCK | 1193 | slg %r9,__LC_LAST_UPDATE_CLOCK |
@@ -1191,6 +1219,7 @@ cleanup_critical: | |||
1191 | clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) | 1219 | clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) |
1192 | jhe 1f | 1220 | jhe 1f |
1193 | lg %r2,__LC_CURRENT | 1221 | lg %r2,__LC_CURRENT |
1222 | aghi %r2,__TASK_thread | ||
1194 | 0: # Store floating-point controls | 1223 | 0: # Store floating-point controls |
1195 | stfpc __THREAD_FPU_fpc(%r2) | 1224 | stfpc __THREAD_FPU_fpc(%r2) |
1196 | 1: # Load register save area and check if VX is active | 1225 | 1: # Load register save area and check if VX is active |
@@ -1252,6 +1281,7 @@ cleanup_critical: | |||
1252 | clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) | 1281 | clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) |
1253 | jhe 6f | 1282 | jhe 6f |
1254 | lg %r4,__LC_CURRENT | 1283 | lg %r4,__LC_CURRENT |
1284 | aghi %r4,__TASK_thread | ||
1255 | lfpc __THREAD_FPU_fpc(%r4) | 1285 | lfpc __THREAD_FPU_fpc(%r4) |
1256 | tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? | 1286 | tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? |
1257 | lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area | 1287 | lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 56fdad479115..a9563409c36e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -157,10 +157,14 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc) | |||
157 | 157 | ||
158 | cpuhw = &get_cpu_var(cpu_hw_events); | 158 | cpuhw = &get_cpu_var(cpu_hw_events); |
159 | 159 | ||
160 | /* check authorization for cpu counter sets */ | 160 | /* Check authorization for cpu counter sets. |
161 | * If the particular CPU counter set is not authorized, | ||
162 | * return with -ENOENT in order to fall back to other | ||
163 | * PMUs that might suffice the event request. | ||
164 | */ | ||
161 | ctrs_state = cpumf_state_ctl[hwc->config_base]; | 165 | ctrs_state = cpumf_state_ctl[hwc->config_base]; |
162 | if (!(ctrs_state & cpuhw->info.auth_ctl)) | 166 | if (!(ctrs_state & cpuhw->info.auth_ctl)) |
163 | err = -EPERM; | 167 | err = -ENOENT; |
164 | 168 | ||
165 | put_cpu_var(cpu_hw_events); | 169 | put_cpu_var(cpu_hw_events); |
166 | return err; | 170 | return err; |
@@ -536,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) | |||
536 | */ | 540 | */ |
537 | if (!(cpuhw->flags & PERF_EVENT_TXN)) | 541 | if (!(cpuhw->flags & PERF_EVENT_TXN)) |
538 | if (validate_ctr_auth(&event->hw)) | 542 | if (validate_ctr_auth(&event->hw)) |
539 | return -EPERM; | 543 | return -ENOENT; |
540 | 544 | ||
541 | ctr_set_enable(&cpuhw->state, event->hw.config_base); | 545 | ctr_set_enable(&cpuhw->state, event->hw.config_base); |
542 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | 546 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
@@ -611,7 +615,7 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu) | |||
611 | state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | 615 | state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); |
612 | state >>= CPUMF_LCCTL_ENABLE_SHIFT; | 616 | state >>= CPUMF_LCCTL_ENABLE_SHIFT; |
613 | if ((state & cpuhw->info.auth_ctl) != state) | 617 | if ((state & cpuhw->info.auth_ctl) != state) |
614 | return -EPERM; | 618 | return -ENOENT; |
615 | 619 | ||
616 | cpuhw->flags &= ~PERF_EVENT_TXN; | 620 | cpuhw->flags &= ~PERF_EVENT_TXN; |
617 | perf_pmu_enable(pmu); | 621 | perf_pmu_enable(pmu); |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index ca6294645dd3..2d6b6e81f812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -30,6 +30,9 @@ ENTRY(swsusp_arch_suspend) | |||
30 | aghi %r15,-STACK_FRAME_OVERHEAD | 30 | aghi %r15,-STACK_FRAME_OVERHEAD |
31 | stg %r1,__SF_BACKCHAIN(%r15) | 31 | stg %r1,__SF_BACKCHAIN(%r15) |
32 | 32 | ||
33 | /* Store FPU registers */ | ||
34 | brasl %r14,save_fpu_regs | ||
35 | |||
33 | /* Deactivate DAT */ | 36 | /* Deactivate DAT */ |
34 | stnsm __SF_EMPTY(%r15),0xfb | 37 | stnsm __SF_EMPTY(%r15),0xfb |
35 | 38 | ||
@@ -47,23 +50,6 @@ ENTRY(swsusp_arch_suspend) | |||
47 | 50 | ||
48 | /* Store registers */ | 51 | /* Store registers */ |
49 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ | 52 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ |
50 | stfpc 0x31c(%r1) /* store fpu control */ | ||
51 | std 0,0x200(%r1) /* store f0 */ | ||
52 | std 1,0x208(%r1) /* store f1 */ | ||
53 | std 2,0x210(%r1) /* store f2 */ | ||
54 | std 3,0x218(%r1) /* store f3 */ | ||
55 | std 4,0x220(%r1) /* store f4 */ | ||
56 | std 5,0x228(%r1) /* store f5 */ | ||
57 | std 6,0x230(%r1) /* store f6 */ | ||
58 | std 7,0x238(%r1) /* store f7 */ | ||
59 | std 8,0x240(%r1) /* store f8 */ | ||
60 | std 9,0x248(%r1) /* store f9 */ | ||
61 | std 10,0x250(%r1) /* store f10 */ | ||
62 | std 11,0x258(%r1) /* store f11 */ | ||
63 | std 12,0x260(%r1) /* store f12 */ | ||
64 | std 13,0x268(%r1) /* store f13 */ | ||
65 | std 14,0x270(%r1) /* store f14 */ | ||
66 | std 15,0x278(%r1) /* store f15 */ | ||
67 | stam %a0,%a15,0x340(%r1) /* store access registers */ | 53 | stam %a0,%a15,0x340(%r1) /* store access registers */ |
68 | stctg %c0,%c15,0x380(%r1) /* store control registers */ | 54 | stctg %c0,%c15,0x380(%r1) /* store control registers */ |
69 | stmg %r0,%r15,0x280(%r1) /* store general registers */ | 55 | stmg %r0,%r15,0x280(%r1) /* store general registers */ |
@@ -249,24 +235,6 @@ restore_registers: | |||
249 | lctlg %c0,%c15,0x380(%r13) /* load control registers */ | 235 | lctlg %c0,%c15,0x380(%r13) /* load control registers */ |
250 | lam %a0,%a15,0x340(%r13) /* load access registers */ | 236 | lam %a0,%a15,0x340(%r13) /* load access registers */ |
251 | 237 | ||
252 | lfpc 0x31c(%r13) /* load fpu control */ | ||
253 | ld 0,0x200(%r13) /* load f0 */ | ||
254 | ld 1,0x208(%r13) /* load f1 */ | ||
255 | ld 2,0x210(%r13) /* load f2 */ | ||
256 | ld 3,0x218(%r13) /* load f3 */ | ||
257 | ld 4,0x220(%r13) /* load f4 */ | ||
258 | ld 5,0x228(%r13) /* load f5 */ | ||
259 | ld 6,0x230(%r13) /* load f6 */ | ||
260 | ld 7,0x238(%r13) /* load f7 */ | ||
261 | ld 8,0x240(%r13) /* load f8 */ | ||
262 | ld 9,0x248(%r13) /* load f9 */ | ||
263 | ld 10,0x250(%r13) /* load f10 */ | ||
264 | ld 11,0x258(%r13) /* load f11 */ | ||
265 | ld 12,0x260(%r13) /* load f12 */ | ||
266 | ld 13,0x268(%r13) /* load f13 */ | ||
267 | ld 14,0x270(%r13) /* load f14 */ | ||
268 | ld 15,0x278(%r13) /* load f15 */ | ||
269 | |||
270 | /* Load old stack */ | 238 | /* Load old stack */ |
271 | lg %r15,0x2f8(%r13) | 239 | lg %r15,0x2f8(%r13) |
272 | 240 | ||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index f3f4a137aef6..8c56929c8d82 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -9,12 +9,12 @@ | |||
9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) | 9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) |
10 | 10 | ||
11 | NI_SYSCALL /* 0 */ | 11 | NI_SYSCALL /* 0 */ |
12 | SYSCALL(sys_exit,compat_sys_exit) | 12 | SYSCALL(sys_exit,sys_exit) |
13 | SYSCALL(sys_fork,sys_fork) | 13 | SYSCALL(sys_fork,sys_fork) |
14 | SYSCALL(sys_read,compat_sys_s390_read) | 14 | SYSCALL(sys_read,compat_sys_s390_read) |
15 | SYSCALL(sys_write,compat_sys_s390_write) | 15 | SYSCALL(sys_write,compat_sys_s390_write) |
16 | SYSCALL(sys_open,compat_sys_open) /* 5 */ | 16 | SYSCALL(sys_open,compat_sys_open) /* 5 */ |
17 | SYSCALL(sys_close,compat_sys_close) | 17 | SYSCALL(sys_close,sys_close) |
18 | SYSCALL(sys_restart_syscall,sys_restart_syscall) | 18 | SYSCALL(sys_restart_syscall,sys_restart_syscall) |
19 | SYSCALL(sys_creat,compat_sys_creat) | 19 | SYSCALL(sys_creat,compat_sys_creat) |
20 | SYSCALL(sys_link,compat_sys_link) | 20 | SYSCALL(sys_link,compat_sys_link) |
@@ -35,21 +35,21 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ | |||
35 | SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ | 35 | SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ |
36 | SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ | 36 | SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ |
37 | SYSCALL(sys_ptrace,compat_sys_ptrace) | 37 | SYSCALL(sys_ptrace,compat_sys_ptrace) |
38 | SYSCALL(sys_alarm,compat_sys_alarm) | 38 | SYSCALL(sys_alarm,sys_alarm) |
39 | NI_SYSCALL /* old fstat syscall */ | 39 | NI_SYSCALL /* old fstat syscall */ |
40 | SYSCALL(sys_pause,sys_pause) | 40 | SYSCALL(sys_pause,sys_pause) |
41 | SYSCALL(sys_utime,compat_sys_utime) /* 30 */ | 41 | SYSCALL(sys_utime,compat_sys_utime) /* 30 */ |
42 | NI_SYSCALL /* old stty syscall */ | 42 | NI_SYSCALL /* old stty syscall */ |
43 | NI_SYSCALL /* old gtty syscall */ | 43 | NI_SYSCALL /* old gtty syscall */ |
44 | SYSCALL(sys_access,compat_sys_access) | 44 | SYSCALL(sys_access,compat_sys_access) |
45 | SYSCALL(sys_nice,compat_sys_nice) | 45 | SYSCALL(sys_nice,sys_nice) |
46 | NI_SYSCALL /* 35 old ftime syscall */ | 46 | NI_SYSCALL /* 35 old ftime syscall */ |
47 | SYSCALL(sys_sync,sys_sync) | 47 | SYSCALL(sys_sync,sys_sync) |
48 | SYSCALL(sys_kill,compat_sys_kill) | 48 | SYSCALL(sys_kill,sys_kill) |
49 | SYSCALL(sys_rename,compat_sys_rename) | 49 | SYSCALL(sys_rename,compat_sys_rename) |
50 | SYSCALL(sys_mkdir,compat_sys_mkdir) | 50 | SYSCALL(sys_mkdir,compat_sys_mkdir) |
51 | SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ | 51 | SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ |
52 | SYSCALL(sys_dup,compat_sys_dup) | 52 | SYSCALL(sys_dup,sys_dup) |
53 | SYSCALL(sys_pipe,compat_sys_pipe) | 53 | SYSCALL(sys_pipe,compat_sys_pipe) |
54 | SYSCALL(sys_times,compat_sys_times) | 54 | SYSCALL(sys_times,compat_sys_times) |
55 | NI_SYSCALL /* old prof syscall */ | 55 | NI_SYSCALL /* old prof syscall */ |
@@ -65,13 +65,13 @@ NI_SYSCALL /* old lock syscall */ | |||
65 | SYSCALL(sys_ioctl,compat_sys_ioctl) | 65 | SYSCALL(sys_ioctl,compat_sys_ioctl) |
66 | SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ | 66 | SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ |
67 | NI_SYSCALL /* intel mpx syscall */ | 67 | NI_SYSCALL /* intel mpx syscall */ |
68 | SYSCALL(sys_setpgid,compat_sys_setpgid) | 68 | SYSCALL(sys_setpgid,sys_setpgid) |
69 | NI_SYSCALL /* old ulimit syscall */ | 69 | NI_SYSCALL /* old ulimit syscall */ |
70 | NI_SYSCALL /* old uname syscall */ | 70 | NI_SYSCALL /* old uname syscall */ |
71 | SYSCALL(sys_umask,compat_sys_umask) /* 60 */ | 71 | SYSCALL(sys_umask,sys_umask) /* 60 */ |
72 | SYSCALL(sys_chroot,compat_sys_chroot) | 72 | SYSCALL(sys_chroot,compat_sys_chroot) |
73 | SYSCALL(sys_ustat,compat_sys_ustat) | 73 | SYSCALL(sys_ustat,compat_sys_ustat) |
74 | SYSCALL(sys_dup2,compat_sys_dup2) | 74 | SYSCALL(sys_dup2,sys_dup2) |
75 | SYSCALL(sys_getppid,sys_getppid) | 75 | SYSCALL(sys_getppid,sys_getppid) |
76 | SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ | 76 | SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ |
77 | SYSCALL(sys_setsid,sys_setsid) | 77 | SYSCALL(sys_setsid,sys_setsid) |
@@ -102,10 +102,10 @@ SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ | |||
102 | SYSCALL(sys_munmap,compat_sys_munmap) | 102 | SYSCALL(sys_munmap,compat_sys_munmap) |
103 | SYSCALL(sys_truncate,compat_sys_truncate) | 103 | SYSCALL(sys_truncate,compat_sys_truncate) |
104 | SYSCALL(sys_ftruncate,compat_sys_ftruncate) | 104 | SYSCALL(sys_ftruncate,compat_sys_ftruncate) |
105 | SYSCALL(sys_fchmod,compat_sys_fchmod) | 105 | SYSCALL(sys_fchmod,sys_fchmod) |
106 | SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ | 106 | SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ |
107 | SYSCALL(sys_getpriority,compat_sys_getpriority) | 107 | SYSCALL(sys_getpriority,sys_getpriority) |
108 | SYSCALL(sys_setpriority,compat_sys_setpriority) | 108 | SYSCALL(sys_setpriority,sys_setpriority) |
109 | NI_SYSCALL /* old profil syscall */ | 109 | NI_SYSCALL /* old profil syscall */ |
110 | SYSCALL(sys_statfs,compat_sys_statfs) | 110 | SYSCALL(sys_statfs,compat_sys_statfs) |
111 | SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ | 111 | SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ |
@@ -126,7 +126,7 @@ SYSCALL(sys_wait4,compat_sys_wait4) | |||
126 | SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ | 126 | SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ |
127 | SYSCALL(sys_sysinfo,compat_sys_sysinfo) | 127 | SYSCALL(sys_sysinfo,compat_sys_sysinfo) |
128 | SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) | 128 | SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) |
129 | SYSCALL(sys_fsync,compat_sys_fsync) | 129 | SYSCALL(sys_fsync,sys_fsync) |
130 | SYSCALL(sys_sigreturn,compat_sys_sigreturn) | 130 | SYSCALL(sys_sigreturn,compat_sys_sigreturn) |
131 | SYSCALL(sys_clone,compat_sys_clone) /* 120 */ | 131 | SYSCALL(sys_clone,compat_sys_clone) /* 120 */ |
132 | SYSCALL(sys_setdomainname,compat_sys_setdomainname) | 132 | SYSCALL(sys_setdomainname,compat_sys_setdomainname) |
@@ -140,35 +140,35 @@ SYSCALL(sys_init_module,compat_sys_init_module) | |||
140 | SYSCALL(sys_delete_module,compat_sys_delete_module) | 140 | SYSCALL(sys_delete_module,compat_sys_delete_module) |
141 | NI_SYSCALL /* 130: old get_kernel_syms */ | 141 | NI_SYSCALL /* 130: old get_kernel_syms */ |
142 | SYSCALL(sys_quotactl,compat_sys_quotactl) | 142 | SYSCALL(sys_quotactl,compat_sys_quotactl) |
143 | SYSCALL(sys_getpgid,compat_sys_getpgid) | 143 | SYSCALL(sys_getpgid,sys_getpgid) |
144 | SYSCALL(sys_fchdir,compat_sys_fchdir) | 144 | SYSCALL(sys_fchdir,sys_fchdir) |
145 | SYSCALL(sys_bdflush,compat_sys_bdflush) | 145 | SYSCALL(sys_bdflush,compat_sys_bdflush) |
146 | SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ | 146 | SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ |
147 | SYSCALL(sys_s390_personality,compat_sys_s390_personality) | 147 | SYSCALL(sys_s390_personality,sys_s390_personality) |
148 | NI_SYSCALL /* for afs_syscall */ | 148 | NI_SYSCALL /* for afs_syscall */ |
149 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ | 149 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ |
150 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ | 150 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ |
151 | SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ | 151 | SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ |
152 | SYSCALL(sys_getdents,compat_sys_getdents) | 152 | SYSCALL(sys_getdents,compat_sys_getdents) |
153 | SYSCALL(sys_select,compat_sys_select) | 153 | SYSCALL(sys_select,compat_sys_select) |
154 | SYSCALL(sys_flock,compat_sys_flock) | 154 | SYSCALL(sys_flock,sys_flock) |
155 | SYSCALL(sys_msync,compat_sys_msync) | 155 | SYSCALL(sys_msync,compat_sys_msync) |
156 | SYSCALL(sys_readv,compat_sys_readv) /* 145 */ | 156 | SYSCALL(sys_readv,compat_sys_readv) /* 145 */ |
157 | SYSCALL(sys_writev,compat_sys_writev) | 157 | SYSCALL(sys_writev,compat_sys_writev) |
158 | SYSCALL(sys_getsid,compat_sys_getsid) | 158 | SYSCALL(sys_getsid,sys_getsid) |
159 | SYSCALL(sys_fdatasync,compat_sys_fdatasync) | 159 | SYSCALL(sys_fdatasync,sys_fdatasync) |
160 | SYSCALL(sys_sysctl,compat_sys_sysctl) | 160 | SYSCALL(sys_sysctl,compat_sys_sysctl) |
161 | SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ | 161 | SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ |
162 | SYSCALL(sys_munlock,compat_sys_munlock) | 162 | SYSCALL(sys_munlock,compat_sys_munlock) |
163 | SYSCALL(sys_mlockall,compat_sys_mlockall) | 163 | SYSCALL(sys_mlockall,sys_mlockall) |
164 | SYSCALL(sys_munlockall,sys_munlockall) | 164 | SYSCALL(sys_munlockall,sys_munlockall) |
165 | SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) | 165 | SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) |
166 | SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ | 166 | SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ |
167 | SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) | 167 | SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) |
168 | SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) | 168 | SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler) |
169 | SYSCALL(sys_sched_yield,sys_sched_yield) | 169 | SYSCALL(sys_sched_yield,sys_sched_yield) |
170 | SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) | 170 | SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max) |
171 | SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ | 171 | SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min) /* 160 */ |
172 | SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) | 172 | SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) |
173 | SYSCALL(sys_nanosleep,compat_sys_nanosleep) | 173 | SYSCALL(sys_nanosleep,compat_sys_nanosleep) |
174 | SYSCALL(sys_mremap,compat_sys_mremap) | 174 | SYSCALL(sys_mremap,compat_sys_mremap) |
@@ -211,20 +211,20 @@ SYSCALL(sys_getuid,sys_getuid) | |||
211 | SYSCALL(sys_getgid,sys_getgid) /* 200 */ | 211 | SYSCALL(sys_getgid,sys_getgid) /* 200 */ |
212 | SYSCALL(sys_geteuid,sys_geteuid) | 212 | SYSCALL(sys_geteuid,sys_geteuid) |
213 | SYSCALL(sys_getegid,sys_getegid) | 213 | SYSCALL(sys_getegid,sys_getegid) |
214 | SYSCALL(sys_setreuid,compat_sys_setreuid) | 214 | SYSCALL(sys_setreuid,sys_setreuid) |
215 | SYSCALL(sys_setregid,compat_sys_setregid) | 215 | SYSCALL(sys_setregid,sys_setregid) |
216 | SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ | 216 | SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ |
217 | SYSCALL(sys_setgroups,compat_sys_setgroups) | 217 | SYSCALL(sys_setgroups,compat_sys_setgroups) |
218 | SYSCALL(sys_fchown,compat_sys_fchown) | 218 | SYSCALL(sys_fchown,sys_fchown) |
219 | SYSCALL(sys_setresuid,compat_sys_setresuid) | 219 | SYSCALL(sys_setresuid,sys_setresuid) |
220 | SYSCALL(sys_getresuid,compat_sys_getresuid) | 220 | SYSCALL(sys_getresuid,compat_sys_getresuid) |
221 | SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ | 221 | SYSCALL(sys_setresgid,sys_setresgid) /* 210 */ |
222 | SYSCALL(sys_getresgid,compat_sys_getresgid) | 222 | SYSCALL(sys_getresgid,compat_sys_getresgid) |
223 | SYSCALL(sys_chown,compat_sys_chown) | 223 | SYSCALL(sys_chown,compat_sys_chown) |
224 | SYSCALL(sys_setuid,compat_sys_setuid) | 224 | SYSCALL(sys_setuid,sys_setuid) |
225 | SYSCALL(sys_setgid,compat_sys_setgid) | 225 | SYSCALL(sys_setgid,sys_setgid) |
226 | SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ | 226 | SYSCALL(sys_setfsuid,sys_setfsuid) /* 215 */ |
227 | SYSCALL(sys_setfsgid,compat_sys_setfsgid) | 227 | SYSCALL(sys_setfsgid,sys_setfsgid) |
228 | SYSCALL(sys_pivot_root,compat_sys_pivot_root) | 228 | SYSCALL(sys_pivot_root,compat_sys_pivot_root) |
229 | SYSCALL(sys_mincore,compat_sys_mincore) | 229 | SYSCALL(sys_mincore,compat_sys_mincore) |
230 | SYSCALL(sys_madvise,compat_sys_madvise) | 230 | SYSCALL(sys_madvise,compat_sys_madvise) |
@@ -245,19 +245,19 @@ SYSCALL(sys_removexattr,compat_sys_removexattr) | |||
245 | SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) | 245 | SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) |
246 | SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ | 246 | SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ |
247 | SYSCALL(sys_gettid,sys_gettid) | 247 | SYSCALL(sys_gettid,sys_gettid) |
248 | SYSCALL(sys_tkill,compat_sys_tkill) | 248 | SYSCALL(sys_tkill,sys_tkill) |
249 | SYSCALL(sys_futex,compat_sys_futex) | 249 | SYSCALL(sys_futex,compat_sys_futex) |
250 | SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) | 250 | SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) |
251 | SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ | 251 | SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ |
252 | SYSCALL(sys_tgkill,compat_sys_tgkill) | 252 | SYSCALL(sys_tgkill,sys_tgkill) |
253 | NI_SYSCALL /* reserved for TUX */ | 253 | NI_SYSCALL /* reserved for TUX */ |
254 | SYSCALL(sys_io_setup,compat_sys_io_setup) | 254 | SYSCALL(sys_io_setup,compat_sys_io_setup) |
255 | SYSCALL(sys_io_destroy,compat_sys_io_destroy) | 255 | SYSCALL(sys_io_destroy,compat_sys_io_destroy) |
256 | SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ | 256 | SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ |
257 | SYSCALL(sys_io_submit,compat_sys_io_submit) | 257 | SYSCALL(sys_io_submit,compat_sys_io_submit) |
258 | SYSCALL(sys_io_cancel,compat_sys_io_cancel) | 258 | SYSCALL(sys_io_cancel,compat_sys_io_cancel) |
259 | SYSCALL(sys_exit_group,compat_sys_exit_group) | 259 | SYSCALL(sys_exit_group,sys_exit_group) |
260 | SYSCALL(sys_epoll_create,compat_sys_epoll_create) | 260 | SYSCALL(sys_epoll_create,sys_epoll_create) |
261 | SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ | 261 | SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ |
262 | SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) | 262 | SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) |
263 | SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) | 263 | SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) |
@@ -265,8 +265,8 @@ SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64) | |||
265 | SYSCALL(sys_timer_create,compat_sys_timer_create) | 265 | SYSCALL(sys_timer_create,compat_sys_timer_create) |
266 | SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ | 266 | SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ |
267 | SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) | 267 | SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) |
268 | SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) | 268 | SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun) |
269 | SYSCALL(sys_timer_delete,compat_sys_timer_delete) | 269 | SYSCALL(sys_timer_delete,sys_timer_delete) |
270 | SYSCALL(sys_clock_settime,compat_sys_clock_settime) | 270 | SYSCALL(sys_clock_settime,compat_sys_clock_settime) |
271 | SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ | 271 | SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ |
272 | SYSCALL(sys_clock_getres,compat_sys_clock_getres) | 272 | SYSCALL(sys_clock_getres,compat_sys_clock_getres) |
@@ -290,11 +290,11 @@ SYSCALL(sys_add_key,compat_sys_add_key) | |||
290 | SYSCALL(sys_request_key,compat_sys_request_key) | 290 | SYSCALL(sys_request_key,compat_sys_request_key) |
291 | SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ | 291 | SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ |
292 | SYSCALL(sys_waitid,compat_sys_waitid) | 292 | SYSCALL(sys_waitid,compat_sys_waitid) |
293 | SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) | 293 | SYSCALL(sys_ioprio_set,sys_ioprio_set) |
294 | SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) | 294 | SYSCALL(sys_ioprio_get,sys_ioprio_get) |
295 | SYSCALL(sys_inotify_init,sys_inotify_init) | 295 | SYSCALL(sys_inotify_init,sys_inotify_init) |
296 | SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ | 296 | SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ |
297 | SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) | 297 | SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch) |
298 | SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) | 298 | SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) |
299 | SYSCALL(sys_openat,compat_sys_openat) | 299 | SYSCALL(sys_openat,compat_sys_openat) |
300 | SYSCALL(sys_mkdirat,compat_sys_mkdirat) | 300 | SYSCALL(sys_mkdirat,compat_sys_mkdirat) |
@@ -326,31 +326,31 @@ SYSCALL(sys_fallocate,compat_sys_s390_fallocate) | |||
326 | SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ | 326 | SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ |
327 | SYSCALL(sys_signalfd,compat_sys_signalfd) | 327 | SYSCALL(sys_signalfd,compat_sys_signalfd) |
328 | NI_SYSCALL /* 317 old sys_timer_fd */ | 328 | NI_SYSCALL /* 317 old sys_timer_fd */ |
329 | SYSCALL(sys_eventfd,compat_sys_eventfd) | 329 | SYSCALL(sys_eventfd,sys_eventfd) |
330 | SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) | 330 | SYSCALL(sys_timerfd_create,sys_timerfd_create) |
331 | SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ | 331 | SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ |
332 | SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) | 332 | SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) |
333 | SYSCALL(sys_signalfd4,compat_sys_signalfd4) | 333 | SYSCALL(sys_signalfd4,compat_sys_signalfd4) |
334 | SYSCALL(sys_eventfd2,compat_sys_eventfd2) | 334 | SYSCALL(sys_eventfd2,sys_eventfd2) |
335 | SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) | 335 | SYSCALL(sys_inotify_init1,sys_inotify_init1) |
336 | SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ | 336 | SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ |
337 | SYSCALL(sys_dup3,compat_sys_dup3) | 337 | SYSCALL(sys_dup3,sys_dup3) |
338 | SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) | 338 | SYSCALL(sys_epoll_create1,sys_epoll_create1) |
339 | SYSCALL(sys_preadv,compat_sys_preadv) | 339 | SYSCALL(sys_preadv,compat_sys_preadv) |
340 | SYSCALL(sys_pwritev,compat_sys_pwritev) | 340 | SYSCALL(sys_pwritev,compat_sys_pwritev) |
341 | SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ | 341 | SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ |
342 | SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) | 342 | SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) |
343 | SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) | 343 | SYSCALL(sys_fanotify_init,sys_fanotify_init) |
344 | SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) | 344 | SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) |
345 | SYSCALL(sys_prlimit64,compat_sys_prlimit64) | 345 | SYSCALL(sys_prlimit64,compat_sys_prlimit64) |
346 | SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ | 346 | SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ |
347 | SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) | 347 | SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) |
348 | SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) | 348 | SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) |
349 | SYSCALL(sys_syncfs,compat_sys_syncfs) | 349 | SYSCALL(sys_syncfs,sys_syncfs) |
350 | SYSCALL(sys_setns,compat_sys_setns) | 350 | SYSCALL(sys_setns,sys_setns) |
351 | SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ | 351 | SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ |
352 | SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) | 352 | SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) |
353 | SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) | 353 | SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr) |
354 | SYSCALL(sys_kcmp,compat_sys_kcmp) | 354 | SYSCALL(sys_kcmp,compat_sys_kcmp) |
355 | SYSCALL(sys_finit_module,compat_sys_finit_module) | 355 | SYSCALL(sys_finit_module,compat_sys_finit_module) |
356 | SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ | 356 | SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ |
@@ -363,3 +363,22 @@ SYSCALL(sys_bpf,compat_sys_bpf) | |||
363 | SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) | 363 | SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) |
364 | SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) | 364 | SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) |
365 | SYSCALL(sys_execveat,compat_sys_execveat) | 365 | SYSCALL(sys_execveat,compat_sys_execveat) |
366 | SYSCALL(sys_userfaultfd,sys_userfaultfd) /* 355 */ | ||
367 | SYSCALL(sys_membarrier,sys_membarrier) | ||
368 | SYSCALL(sys_recvmmsg,compat_sys_recvmmsg) | ||
369 | SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) | ||
370 | SYSCALL(sys_socket,sys_socket) | ||
371 | SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ | ||
372 | SYSCALL(sys_bind,sys_bind) | ||
373 | SYSCALL(sys_connect,sys_connect) | ||
374 | SYSCALL(sys_listen,sys_listen) | ||
375 | SYSCALL(sys_accept4,sys_accept4) | ||
376 | SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ | ||
377 | SYSCALL(sys_setsockopt,compat_sys_setsockopt) | ||
378 | SYSCALL(sys_getsockname,compat_sys_getsockname) | ||
379 | SYSCALL(sys_getpeername,compat_sys_getpeername) | ||
380 | SYSCALL(sys_sendto,compat_sys_sendto) | ||
381 | SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */ | ||
382 | SYSCALL(sys_recvfrom,compat_sys_recvfrom) | ||
383 | SYSCALL(sys_recvmsg,compat_sys_recvmsg) | ||
384 | SYSCALL(sys_shutdown,sys_shutdown) | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index b9ce650e9e99..dafc44f519c3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock); | |||
25 | static atomic64_t virt_timer_current; | 25 | static atomic64_t virt_timer_current; |
26 | static atomic64_t virt_timer_elapsed; | 26 | static atomic64_t virt_timer_elapsed; |
27 | 27 | ||
28 | static DEFINE_PER_CPU(u64, mt_cycles[32]); | 28 | DEFINE_PER_CPU(u64, mt_cycles[8]); |
29 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; | 29 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; |
30 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; | 30 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; |
31 | static DEFINE_PER_CPU(u64, mt_scaling_jiffies); | 31 | static DEFINE_PER_CPU(u64, mt_scaling_jiffies); |
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed) | |||
60 | return elapsed >= atomic64_read(&virt_timer_current); | 60 | return elapsed >= atomic64_read(&virt_timer_current); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void update_mt_scaling(void) | ||
64 | { | ||
65 | u64 cycles_new[8], *cycles_old; | ||
66 | u64 delta, fac, mult, div; | ||
67 | int i; | ||
68 | |||
69 | stcctm5(smp_cpu_mtid + 1, cycles_new); | ||
70 | cycles_old = this_cpu_ptr(mt_cycles); | ||
71 | fac = 1; | ||
72 | mult = div = 0; | ||
73 | for (i = 0; i <= smp_cpu_mtid; i++) { | ||
74 | delta = cycles_new[i] - cycles_old[i]; | ||
75 | div += delta; | ||
76 | mult *= i + 1; | ||
77 | mult += delta * fac; | ||
78 | fac *= i + 1; | ||
79 | } | ||
80 | div *= fac; | ||
81 | if (div > 0) { | ||
82 | /* Update scaling factor */ | ||
83 | __this_cpu_write(mt_scaling_mult, mult); | ||
84 | __this_cpu_write(mt_scaling_div, div); | ||
85 | memcpy(cycles_old, cycles_new, | ||
86 | sizeof(u64) * (smp_cpu_mtid + 1)); | ||
87 | } | ||
88 | __this_cpu_write(mt_scaling_jiffies, jiffies_64); | ||
89 | } | ||
90 | |||
63 | /* | 91 | /* |
64 | * Update process times based on virtual cpu times stored by entry.S | 92 | * Update process times based on virtual cpu times stored by entry.S |
65 | * to the lowcore fields user_timer, system_timer & steal_clock. | 93 | * to the lowcore fields user_timer, system_timer & steal_clock. |
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) | |||
69 | struct thread_info *ti = task_thread_info(tsk); | 97 | struct thread_info *ti = task_thread_info(tsk); |
70 | u64 timer, clock, user, system, steal; | 98 | u64 timer, clock, user, system, steal; |
71 | u64 user_scaled, system_scaled; | 99 | u64 user_scaled, system_scaled; |
72 | int i; | ||
73 | 100 | ||
74 | timer = S390_lowcore.last_update_timer; | 101 | timer = S390_lowcore.last_update_timer; |
75 | clock = S390_lowcore.last_update_clock; | 102 | clock = S390_lowcore.last_update_clock; |
@@ -85,30 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) | |||
85 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 112 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
86 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; | 113 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
87 | 114 | ||
88 | /* Do MT utilization calculation */ | 115 | /* Update MT utilization calculation */ |
89 | if (smp_cpu_mtid && | 116 | if (smp_cpu_mtid && |
90 | time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { | 117 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) |
91 | u64 cycles_new[32], *cycles_old; | 118 | update_mt_scaling(); |
92 | u64 delta, mult, div; | ||
93 | |||
94 | cycles_old = this_cpu_ptr(mt_cycles); | ||
95 | if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { | ||
96 | mult = div = 0; | ||
97 | for (i = 0; i <= smp_cpu_mtid; i++) { | ||
98 | delta = cycles_new[i] - cycles_old[i]; | ||
99 | mult += delta; | ||
100 | div += (i + 1) * delta; | ||
101 | } | ||
102 | if (mult > 0) { | ||
103 | /* Update scaling factor */ | ||
104 | __this_cpu_write(mt_scaling_mult, mult); | ||
105 | __this_cpu_write(mt_scaling_div, div); | ||
106 | memcpy(cycles_old, cycles_new, | ||
107 | sizeof(u64) * (smp_cpu_mtid + 1)); | ||
108 | } | ||
109 | } | ||
110 | __this_cpu_write(mt_scaling_jiffies, jiffies_64); | ||
111 | } | ||
112 | 119 | ||
113 | user = S390_lowcore.user_timer - ti->user_timer; | 120 | user = S390_lowcore.user_timer - ti->user_timer; |
114 | S390_lowcore.steal_timer -= user; | 121 | S390_lowcore.steal_timer -= user; |
@@ -177,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk) | |||
177 | S390_lowcore.last_update_timer = get_vtimer(); | 184 | S390_lowcore.last_update_timer = get_vtimer(); |
178 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 185 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
179 | 186 | ||
187 | /* Update MT utilization calculation */ | ||
188 | if (smp_cpu_mtid && | ||
189 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) | ||
190 | update_mt_scaling(); | ||
191 | |||
180 | system = S390_lowcore.system_timer - ti->system_timer; | 192 | system = S390_lowcore.system_timer - ti->system_timer; |
181 | S390_lowcore.steal_timer -= system; | 193 | S390_lowcore.steal_timer -= system; |
182 | ti->system_timer = S390_lowcore.system_timer; | 194 | ti->system_timer = S390_lowcore.system_timer; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c91eb941b444..0a67c40eece9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
63 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | 63 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
64 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 64 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
65 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | 65 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
66 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, | ||
66 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 67 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
67 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 68 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
68 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 69 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
@@ -1574,7 +1575,7 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) | |||
1574 | 1575 | ||
1575 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) | 1576 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) |
1576 | { | 1577 | { |
1577 | atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); | 1578 | atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
1578 | } | 1579 | } |
1579 | 1580 | ||
1580 | /* | 1581 | /* |
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 7de4e2f780d7..30b2698a28e2 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c | |||
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core) | |||
368 | cpumask_copy(&top->thread_mask, &core->mask); | 368 | cpumask_copy(&top->thread_mask, &core->mask); |
369 | cpumask_copy(&top->core_mask, &core_mc(core)->mask); | 369 | cpumask_copy(&top->core_mask, &core_mc(core)->mask); |
370 | cpumask_copy(&top->book_mask, &core_book(core)->mask); | 370 | cpumask_copy(&top->book_mask, &core_book(core)->mask); |
371 | cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); | 371 | cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); |
372 | top->node_id = core_node(core)->id; | 372 | top->node_id = core_node(core)->id; |
373 | } | 373 | } |
374 | } | 374 | } |
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa) | |||
383 | 383 | ||
384 | /* Clear all node masks */ | 384 | /* Clear all node masks */ |
385 | for (i = 0; i < MAX_NUMNODES; i++) | 385 | for (i = 0; i < MAX_NUMNODES; i++) |
386 | cpumask_clear(node_to_cpumask_map[i]); | 386 | cpumask_clear(&node_to_cpumask_map[i]); |
387 | 387 | ||
388 | /* Rebuild all masks */ | 388 | /* Rebuild all masks */ |
389 | toptree_for_each(core, numa, CORE) | 389 | toptree_for_each(core, numa, CORE) |
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 09b1d2355bd9..43f32ce60aa3 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c | |||
@@ -23,7 +23,7 @@ | |||
23 | pg_data_t *node_data[MAX_NUMNODES]; | 23 | pg_data_t *node_data[MAX_NUMNODES]; |
24 | EXPORT_SYMBOL(node_data); | 24 | EXPORT_SYMBOL(node_data); |
25 | 25 | ||
26 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 26 | cpumask_t node_to_cpumask_map[MAX_NUMNODES]; |
27 | EXPORT_SYMBOL(node_to_cpumask_map); | 27 | EXPORT_SYMBOL(node_to_cpumask_map); |
28 | 28 | ||
29 | const struct numa_mode numa_mode_plain = { | 29 | const struct numa_mode numa_mode_plain = { |
@@ -144,7 +144,7 @@ void __init numa_setup(void) | |||
144 | static int __init numa_init_early(void) | 144 | static int __init numa_init_early(void) |
145 | { | 145 | { |
146 | /* Attach all possible CPUs to node 0 for now. */ | 146 | /* Attach all possible CPUs to node 0 for now. */ |
147 | cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); | 147 | cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); |
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | early_initcall(numa_init_early); | 150 | early_initcall(numa_init_early); |
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild | |||
@@ -13,3 +13,4 @@ generic-y += sections.h | |||
13 | generic-y += trace_clock.h | 13 | generic-y += trace_clock.h |
14 | generic-y += xor.h | 14 | generic-y += xor.h |
15 | generic-y += serial.h | 15 | generic-y += serial.h |
16 | generic-y += word-at-a-time.h | ||
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index 6f97a8f0d0d6..6129aef6db76 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c | |||
@@ -29,7 +29,7 @@ | |||
29 | static void __iomem *se7343_irq_regs; | 29 | static void __iomem *se7343_irq_regs; |
30 | struct irq_domain *se7343_irq_domain; | 30 | struct irq_domain *se7343_irq_domain; |
31 | 31 | ||
32 | static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc) | 32 | static void se7343_irq_demux(struct irq_desc *desc) |
33 | { | 33 | { |
34 | struct irq_data *data = irq_desc_get_irq_data(desc); | 34 | struct irq_data *data = irq_desc_get_irq_data(desc); |
35 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 35 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index 60aebd14ccf8..24c74a88290c 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static void __iomem *se7722_irq_regs; | 28 | static void __iomem *se7722_irq_regs; |
29 | struct irq_domain *se7722_irq_domain; | 29 | struct irq_domain *se7722_irq_domain; |
30 | 30 | ||
31 | static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) | 31 | static void se7722_irq_demux(struct irq_desc *desc) |
32 | { | 32 | { |
33 | struct irq_data *data = irq_desc_get_irq_data(desc); | 33 | struct irq_data *data = irq_desc_get_irq_data(desc); |
34 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 34 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c index 9f2033898652..64e681e66c57 100644 --- a/arch/sh/boards/mach-se/7724/irq.c +++ b/arch/sh/boards/mach-se/7724/irq.c | |||
@@ -92,7 +92,7 @@ static struct irq_chip se7724_irq_chip __read_mostly = { | |||
92 | .irq_unmask = enable_se7724_irq, | 92 | .irq_unmask = enable_se7724_irq, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc) | 95 | static void se7724_irq_demux(struct irq_desc *desc) |
96 | { | 96 | { |
97 | unsigned int irq = irq_desc_get_irq(desc); | 97 | unsigned int irq = irq_desc_get_irq(desc); |
98 | struct fpga_irq set = get_fpga_irq(irq); | 98 | struct fpga_irq set = get_fpga_irq(irq); |
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index 24555c364d5b..1fb2cbee25f2 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c | |||
@@ -60,7 +60,7 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) | |||
60 | return virq; | 60 | return virq; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 63 | static void x3proto_gpio_irq_handler(struct irq_desc *desc) |
64 | { | 64 | { |
65 | struct irq_data *data = irq_desc_get_irq_data(desc); | 65 | struct irq_data *data = irq_desc_get_irq_data(desc); |
66 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 66 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index e9735616bdc8..8180092502f7 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c | |||
@@ -56,7 +56,7 @@ static struct irq_chip hd64461_irq_chip = { | |||
56 | .irq_unmask = hd64461_unmask_irq, | 56 | .irq_unmask = hd64461_unmask_irq, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) | 59 | static void hd64461_irq_demux(struct irq_desc *desc) |
60 | { | 60 | { |
61 | unsigned short intv = __raw_readw(HD64461_NIRR); | 61 | unsigned short intv = __raw_readw(HD64461_NIRR); |
62 | unsigned int ext_irq = HD64461_IRQBASE; | 62 | unsigned int ext_irq = HD64461_IRQBASE; |
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index fe20d14ae051..ceb5201a30ed 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h | |||
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) | |||
59 | 59 | ||
60 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 60 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
61 | extern void copy_page(void *to, void *from); | 61 | extern void copy_page(void *to, void *from); |
62 | #define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) | ||
62 | 63 | ||
63 | struct page; | 64 | struct page; |
64 | struct vm_area_struct; | 65 | struct vm_area_struct; |
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 2e48eb8813ff..c90930de76ba 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c | |||
@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { { | |||
433 | .blkcipher = { | 433 | .blkcipher = { |
434 | .min_keysize = AES_MIN_KEY_SIZE, | 434 | .min_keysize = AES_MIN_KEY_SIZE, |
435 | .max_keysize = AES_MAX_KEY_SIZE, | 435 | .max_keysize = AES_MAX_KEY_SIZE, |
436 | .ivsize = AES_BLOCK_SIZE, | ||
436 | .setkey = aes_set_key, | 437 | .setkey = aes_set_key, |
437 | .encrypt = cbc_encrypt, | 438 | .encrypt = cbc_encrypt, |
438 | .decrypt = cbc_decrypt, | 439 | .decrypt = cbc_decrypt, |
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { { | |||
452 | .blkcipher = { | 453 | .blkcipher = { |
453 | .min_keysize = AES_MIN_KEY_SIZE, | 454 | .min_keysize = AES_MIN_KEY_SIZE, |
454 | .max_keysize = AES_MAX_KEY_SIZE, | 455 | .max_keysize = AES_MAX_KEY_SIZE, |
456 | .ivsize = AES_BLOCK_SIZE, | ||
455 | .setkey = aes_set_key, | 457 | .setkey = aes_set_key, |
456 | .encrypt = ctr_crypt, | 458 | .encrypt = ctr_crypt, |
457 | .decrypt = ctr_crypt, | 459 | .decrypt = ctr_crypt, |
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 6bf2479a12fb..561a84d93cf6 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c | |||
@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { { | |||
274 | .blkcipher = { | 274 | .blkcipher = { |
275 | .min_keysize = CAMELLIA_MIN_KEY_SIZE, | 275 | .min_keysize = CAMELLIA_MIN_KEY_SIZE, |
276 | .max_keysize = CAMELLIA_MAX_KEY_SIZE, | 276 | .max_keysize = CAMELLIA_MAX_KEY_SIZE, |
277 | .ivsize = CAMELLIA_BLOCK_SIZE, | ||
277 | .setkey = camellia_set_key, | 278 | .setkey = camellia_set_key, |
278 | .encrypt = cbc_encrypt, | 279 | .encrypt = cbc_encrypt, |
279 | .decrypt = cbc_decrypt, | 280 | .decrypt = cbc_decrypt, |
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index dd6a34fa6e19..61af794aa2d3 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c | |||
@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { { | |||
429 | .blkcipher = { | 429 | .blkcipher = { |
430 | .min_keysize = DES_KEY_SIZE, | 430 | .min_keysize = DES_KEY_SIZE, |
431 | .max_keysize = DES_KEY_SIZE, | 431 | .max_keysize = DES_KEY_SIZE, |
432 | .ivsize = DES_BLOCK_SIZE, | ||
432 | .setkey = des_set_key, | 433 | .setkey = des_set_key, |
433 | .encrypt = cbc_encrypt, | 434 | .encrypt = cbc_encrypt, |
434 | .decrypt = cbc_decrypt, | 435 | .decrypt = cbc_decrypt, |
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { { | |||
485 | .blkcipher = { | 486 | .blkcipher = { |
486 | .min_keysize = DES3_EDE_KEY_SIZE, | 487 | .min_keysize = DES3_EDE_KEY_SIZE, |
487 | .max_keysize = DES3_EDE_KEY_SIZE, | 488 | .max_keysize = DES3_EDE_KEY_SIZE, |
489 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
488 | .setkey = des3_ede_set_key, | 490 | .setkey = des3_ede_set_key, |
489 | .encrypt = cbc3_encrypt, | 491 | .encrypt = cbc3_encrypt, |
490 | .decrypt = cbc3_decrypt, | 492 | .decrypt = cbc3_decrypt, |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 0299f052a2ef..42efcf85f721 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -53,7 +53,7 @@ static inline unsigned int leon_eirq_get(int cpu) | |||
53 | } | 53 | } |
54 | 54 | ||
55 | /* Handle one or multiple IRQs from the extended interrupt controller */ | 55 | /* Handle one or multiple IRQs from the extended interrupt controller */ |
56 | static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) | 56 | static void leon_handle_ext_irq(struct irq_desc *desc) |
57 | { | 57 | { |
58 | unsigned int eirq; | 58 | unsigned int eirq; |
59 | struct irq_bucket *p; | 59 | struct irq_bucket *p; |
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 3382f7b3eeef..1e77128a8f88 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c | |||
@@ -357,7 +357,7 @@ static struct irq_chip grpci1_irq = { | |||
357 | }; | 357 | }; |
358 | 358 | ||
359 | /* Handle one or multiple IRQs from the PCI core */ | 359 | /* Handle one or multiple IRQs from the PCI core */ |
360 | static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) | 360 | static void grpci1_pci_flow_irq(struct irq_desc *desc) |
361 | { | 361 | { |
362 | struct grpci1_priv *priv = grpci1priv; | 362 | struct grpci1_priv *priv = grpci1priv; |
363 | int i, ack = 0; | 363 | int i, ack = 0; |
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 814fb1729b12..f727c4de1316 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c | |||
@@ -498,7 +498,7 @@ static struct irq_chip grpci2_irq = { | |||
498 | }; | 498 | }; |
499 | 499 | ||
500 | /* Handle one or multiple IRQs from the PCI core */ | 500 | /* Handle one or multiple IRQs from the PCI core */ |
501 | static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) | 501 | static void grpci2_pci_flow_irq(struct irq_desc *desc) |
502 | { | 502 | { |
503 | struct grpci2_priv *priv = grpci2priv; | 503 | struct grpci2_priv *priv = grpci2priv; |
504 | int i, ack = 0; | 504 | int i, ack = 0; |
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/string.h> | ||
22 | 23 | ||
23 | #include <gxio/iorpc_globals.h> | 24 | #include <gxio/iorpc_globals.h> |
24 | #include <gxio/iorpc_mpipe.h> | 25 | #include <gxio/iorpc_mpipe.h> |
@@ -29,32 +30,6 @@ | |||
29 | /* HACK: Avoid pointless "shadow" warnings. */ | 30 | /* HACK: Avoid pointless "shadow" warnings. */ |
30 | #define link link_shadow | 31 | #define link link_shadow |
31 | 32 | ||
32 | /** | ||
33 | * strscpy - Copy a C-string into a sized buffer, but only if it fits | ||
34 | * @dest: Where to copy the string to | ||
35 | * @src: Where to copy the string from | ||
36 | * @size: size of destination buffer | ||
37 | * | ||
38 | * Use this routine to avoid copying too-long strings. | ||
39 | * The routine returns the total number of bytes copied | ||
40 | * (including the trailing NUL) or zero if the buffer wasn't | ||
41 | * big enough. To ensure that programmers pay attention | ||
42 | * to the return code, the destination has a single NUL | ||
43 | * written at the front (if size is non-zero) when the | ||
44 | * buffer is not big enough. | ||
45 | */ | ||
46 | static size_t strscpy(char *dest, const char *src, size_t size) | ||
47 | { | ||
48 | size_t len = strnlen(src, size) + 1; | ||
49 | if (len > size) { | ||
50 | if (size) | ||
51 | dest[0] = '\0'; | ||
52 | return 0; | ||
53 | } | ||
54 | memcpy(dest, src, len); | ||
55 | return len; | ||
56 | } | ||
57 | |||
58 | int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) | 33 | int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) |
59 | { | 34 | { |
60 | char file[32]; | 35 | char file[32]; |
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) | |||
540 | if (!context) | 515 | if (!context) |
541 | return GXIO_ERR_NO_DEVICE; | 516 | return GXIO_ERR_NO_DEVICE; |
542 | 517 | ||
543 | if (strscpy(name.name, link_name, sizeof(name.name)) == 0) | 518 | if (strscpy(name.name, link_name, sizeof(name.name)) < 0) |
544 | return GXIO_ERR_NO_DEVICE; | 519 | return GXIO_ERR_NO_DEVICE; |
545 | 520 | ||
546 | return gxio_mpipe_info_instance_aux(context, name); | 521 | return gxio_mpipe_info_instance_aux(context, name); |
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) | |||
559 | 534 | ||
560 | rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); | 535 | rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); |
561 | if (rv >= 0) { | 536 | if (rv >= 0) { |
562 | if (strscpy(link_name, name.name, sizeof(name.name)) == 0) | 537 | if (strscpy(link_name, name.name, sizeof(name.name)) < 0) |
563 | return GXIO_ERR_INVAL_MEMORY_SIZE; | 538 | return GXIO_ERR_INVAL_MEMORY_SIZE; |
564 | memcpy(link_mac, mac.mac, sizeof(mac.mac)); | 539 | memcpy(link_mac, mac.mac, sizeof(mac.mac)); |
565 | } | 540 | } |
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, | |||
576 | _gxio_mpipe_link_name_t name; | 551 | _gxio_mpipe_link_name_t name; |
577 | int rv; | 552 | int rv; |
578 | 553 | ||
579 | if (strscpy(name.name, link_name, sizeof(name.name)) == 0) | 554 | if (strscpy(name.name, link_name, sizeof(name.name)) < 0) |
580 | return GXIO_ERR_NO_DEVICE; | 555 | return GXIO_ERR_NO_DEVICE; |
581 | 556 | ||
582 | rv = gxio_mpipe_link_open_aux(context, name, flags); | 557 | rv = gxio_mpipe_link_open_aux(context, name, flags); |
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h index 9e5ce0d7b292..b66a693c2c34 100644 --- a/arch/tile/include/asm/word-at-a-time.h +++ b/arch/tile/include/asm/word-at-a-time.h | |||
@@ -6,7 +6,7 @@ | |||
6 | struct word_at_a_time { /* unused */ }; | 6 | struct word_at_a_time { /* unused */ }; |
7 | #define WORD_AT_A_TIME_CONSTANTS {} | 7 | #define WORD_AT_A_TIME_CONSTANTS {} |
8 | 8 | ||
9 | /* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ | 9 | /* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ |
10 | static inline unsigned long has_zero(unsigned long val, unsigned long *data, | 10 | static inline unsigned long has_zero(unsigned long val, unsigned long *data, |
11 | const struct word_at_a_time *c) | 11 | const struct word_at_a_time *c) |
12 | { | 12 | { |
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask) | |||
33 | #endif | 33 | #endif |
34 | } | 34 | } |
35 | 35 | ||
36 | #ifdef __BIG_ENDIAN | ||
37 | #define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) | ||
38 | #else | ||
39 | #define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) | ||
40 | #endif | ||
41 | |||
36 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 42 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b3f73fd764a3..4c017d0d2de8 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c | |||
@@ -304,17 +304,16 @@ static struct irq_chip tilegx_legacy_irq_chip = { | |||
304 | * to Linux which just calls handle_level_irq() after clearing the | 304 | * to Linux which just calls handle_level_irq() after clearing the |
305 | * MAC INTx Assert status bit associated with this interrupt. | 305 | * MAC INTx Assert status bit associated with this interrupt. |
306 | */ | 306 | */ |
307 | static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) | 307 | static void trio_handle_level_irq(struct irq_desc *desc) |
308 | { | 308 | { |
309 | struct pci_controller *controller = irq_desc_get_handler_data(desc); | 309 | struct pci_controller *controller = irq_desc_get_handler_data(desc); |
310 | gxio_trio_context_t *trio_context = controller->trio; | 310 | gxio_trio_context_t *trio_context = controller->trio; |
311 | uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); | 311 | uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); |
312 | unsigned int irq = irq_desc_get_irq(desc); | ||
313 | int mac = controller->mac; | 312 | int mac = controller->mac; |
314 | unsigned int reg_offset; | 313 | unsigned int reg_offset; |
315 | uint64_t level_mask; | 314 | uint64_t level_mask; |
316 | 315 | ||
317 | handle_level_irq(irq, desc); | 316 | handle_level_irq(desc); |
318 | 317 | ||
319 | /* | 318 | /* |
320 | * Clear the INTx Level status, otherwise future interrupts are | 319 | * Clear the INTx Level status, otherwise future interrupts are |
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/usb/tilegx.h> | 23 | #include <linux/usb/tilegx.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
26 | 27 | ||
27 | static u64 ehci_dmamask = DMA_BIT_MASK(32); | 28 | static u64 ehci_dmamask = DMA_BIT_MASK(32); |
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -25,4 +25,5 @@ generic-y += preempt.h | |||
25 | generic-y += switch_to.h | 25 | generic-y += switch_to.h |
26 | generic-y += topology.h | 26 | generic-y += topology.h |
27 | generic-y += trace_clock.h | 27 | generic-y += trace_clock.h |
28 | generic-y += word-at-a-time.h | ||
28 | generic-y += xor.h | 29 | generic-y += xor.h |
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
@@ -62,4 +62,5 @@ generic-y += ucontext.h | |||
62 | generic-y += unaligned.h | 62 | generic-y += unaligned.h |
63 | generic-y += user.h | 63 | generic-y += user.h |
64 | generic-y += vga.h | 64 | generic-y += vga.h |
65 | generic-y += word-at-a-time.h | ||
65 | generic-y += xor.h | 66 | generic-y += xor.h |
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index c53729d92e8d..eb1fd0030359 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c | |||
@@ -112,7 +112,7 @@ static struct irq_chip puv3_low_gpio_chip = { | |||
112 | * irq_controller_lock held, and IRQs disabled. Decode the IRQ | 112 | * irq_controller_lock held, and IRQs disabled. Decode the IRQ |
113 | * and call the handler. | 113 | * and call the handler. |
114 | */ | 114 | */ |
115 | static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc) | 115 | static void puv3_gpio_handler(struct irq_desc *desc) |
116 | { | 116 | { |
117 | unsigned int mask, irq; | 117 | unsigned int mask, irq; |
118 | 118 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7aef2d52daa0..96d058a87100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1006,7 +1006,7 @@ config X86_THERMAL_VECTOR | |||
1006 | depends on X86_MCE_INTEL | 1006 | depends on X86_MCE_INTEL |
1007 | 1007 | ||
1008 | config X86_LEGACY_VM86 | 1008 | config X86_LEGACY_VM86 |
1009 | bool "Legacy VM86 support (obsolete)" | 1009 | bool "Legacy VM86 support" |
1010 | default n | 1010 | default n |
1011 | depends on X86_32 | 1011 | depends on X86_32 |
1012 | ---help--- | 1012 | ---help--- |
@@ -1018,19 +1018,20 @@ config X86_LEGACY_VM86 | |||
1018 | available to accelerate real mode DOS programs. However, any | 1018 | available to accelerate real mode DOS programs. However, any |
1019 | recent version of DOSEMU, X, or vbetool should be fully | 1019 | recent version of DOSEMU, X, or vbetool should be fully |
1020 | functional even without kernel VM86 support, as they will all | 1020 | functional even without kernel VM86 support, as they will all |
1021 | fall back to (pretty well performing) software emulation. | 1021 | fall back to software emulation. Nevertheless, if you are using |
1022 | a 16-bit DOS program where 16-bit performance matters, vm86 | ||
1023 | mode might be faster than emulation and you might want to | ||
1024 | enable this option. | ||
1022 | 1025 | ||
1023 | Anything that works on a 64-bit kernel is unlikely to need | 1026 | Note that any app that works on a 64-bit kernel is unlikely to |
1024 | this option, as 64-bit kernels don't, and can't, support V8086 | 1027 | need this option, as 64-bit kernels don't, and can't, support |
1025 | mode. This option is also unrelated to 16-bit protected mode | 1028 | V8086 mode. This option is also unrelated to 16-bit protected |
1026 | and is not needed to run most 16-bit programs under Wine. | 1029 | mode and is not needed to run most 16-bit programs under Wine. |
1027 | 1030 | ||
1028 | Enabling this option adds considerable attack surface to the | 1031 | Enabling this option increases the complexity of the kernel |
1029 | kernel and slows down system calls and exception handling. | 1032 | and slows down exception handling a tiny bit. |
1030 | 1033 | ||
1031 | Unless you use very old userspace or need the last drop of | 1034 | If unsure, say N here. |
1032 | performance in your real mode DOS games and can't use KVM, | ||
1033 | say N here. | ||
1034 | 1035 | ||
1035 | config VM86 | 1036 | config VM86 |
1036 | bool | 1037 | bool |
@@ -1307,6 +1308,7 @@ config HIGHMEM | |||
1307 | config X86_PAE | 1308 | config X86_PAE |
1308 | bool "PAE (Physical Address Extension) Support" | 1309 | bool "PAE (Physical Address Extension) Support" |
1309 | depends on X86_32 && !HIGHMEM4G | 1310 | depends on X86_32 && !HIGHMEM4G |
1311 | select SWIOTLB | ||
1310 | ---help--- | 1312 | ---help--- |
1311 | PAE is required for NX support, and furthermore enables | 1313 | PAE is required for NX support, and furthermore enables |
1312 | larger swapspace support for non-overcommit purposes. It | 1314 | larger swapspace support for non-overcommit purposes. It |
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 80a0e4389c9a..bacaa13acac5 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c | |||
@@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void) | |||
554 | { | 554 | { |
555 | const char *feature_name; | 555 | const char *feature_name; |
556 | 556 | ||
557 | if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) { | ||
558 | pr_info("AVX or AES-NI instructions are not detected.\n"); | ||
559 | return -ENODEV; | ||
560 | } | ||
561 | |||
557 | if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { | 562 | if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { |
558 | pr_info("CPU feature '%s' is not supported.\n", feature_name); | 563 | pr_info("CPU feature '%s' is not supported.\n", feature_name); |
559 | return -ENODEV; | 564 | return -ENODEV; |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d3033183ed70..055a01de7c8d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -1128,7 +1128,18 @@ END(error_exit) | |||
1128 | 1128 | ||
1129 | /* Runs on exception stack */ | 1129 | /* Runs on exception stack */ |
1130 | ENTRY(nmi) | 1130 | ENTRY(nmi) |
1131 | /* | ||
1132 | * Fix up the exception frame if we're on Xen. | ||
1133 | * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most | ||
1134 | * one value to the stack on native, so it may clobber the rdx | ||
1135 | * scratch slot, but it won't clobber any of the important | ||
1136 | * slots past it. | ||
1137 | * | ||
1138 | * Xen is a different story, because the Xen frame itself overlaps | ||
1139 | * the "NMI executing" variable. | ||
1140 | */ | ||
1131 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1141 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1142 | |||
1132 | /* | 1143 | /* |
1133 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | 1144 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
1134 | * the iretq it performs will take us out of NMI context. | 1145 | * the iretq it performs will take us out of NMI context. |
@@ -1179,9 +1190,12 @@ ENTRY(nmi) | |||
1179 | * we don't want to enable interrupts, because then we'll end | 1190 | * we don't want to enable interrupts, because then we'll end |
1180 | * up in an awkward situation in which IRQs are on but NMIs | 1191 | * up in an awkward situation in which IRQs are on but NMIs |
1181 | * are off. | 1192 | * are off. |
1193 | * | ||
1194 | * We also must not push anything to the stack before switching | ||
1195 | * stacks lest we corrupt the "NMI executing" variable. | ||
1182 | */ | 1196 | */ |
1183 | 1197 | ||
1184 | SWAPGS | 1198 | SWAPGS_UNSAFE_STACK |
1185 | cld | 1199 | cld |
1186 | movq %rsp, %rdx | 1200 | movq %rsp, %rdx |
1187 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | 1201 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 477fc28050e4..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -193,7 +193,7 @@ | |||
193 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ | 193 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
194 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ | 194 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
195 | #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ | 195 | #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ |
196 | #define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ | 196 | #define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ |
197 | #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ | 197 | #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ |
198 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ | 198 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ |
199 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ | 199 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ |
@@ -241,6 +241,7 @@ | |||
241 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ | 241 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
242 | #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ | 242 | #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ |
243 | #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ | 243 | #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ |
244 | #define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ | ||
244 | 245 | ||
245 | /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ | 246 | /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ |
246 | #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ | 247 | #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 155162ea0e00..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -86,6 +86,18 @@ extern u64 asmlinkage efi_call(void *fp, ...); | |||
86 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, | 86 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, |
87 | u32 type, u64 attribute); | 87 | u32 type, u64 attribute); |
88 | 88 | ||
89 | #ifdef CONFIG_KASAN | ||
90 | /* | ||
91 | * CONFIG_KASAN may redefine memset to __memset. __memset function is present | ||
92 | * only in kernel binary. Since the EFI stub linked into a separate binary it | ||
93 | * doesn't have __memset(). So we should use standard memset from | ||
94 | * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. | ||
95 | */ | ||
96 | #undef memcpy | ||
97 | #undef memset | ||
98 | #undef memmove | ||
99 | #endif | ||
100 | |||
89 | #endif /* CONFIG_X86_32 */ | 101 | #endif /* CONFIG_X86_32 */ |
90 | 102 | ||
91 | extern struct efi_scratch efi_scratch; | 103 | extern struct efi_scratch efi_scratch; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c12e845f59e6..3a36ee704c30 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | #define KVM_PIO_PAGE_OFFSET 1 | 41 | #define KVM_PIO_PAGE_OFFSET 1 |
42 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | 42 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 |
43 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | ||
43 | 44 | ||
44 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS | 45 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
45 | 46 | ||
@@ -711,6 +712,7 @@ struct kvm_vcpu_stat { | |||
711 | u32 nmi_window_exits; | 712 | u32 nmi_window_exits; |
712 | u32 halt_exits; | 713 | u32 halt_exits; |
713 | u32 halt_successful_poll; | 714 | u32 halt_successful_poll; |
715 | u32 halt_attempted_poll; | ||
714 | u32 halt_wakeup; | 716 | u32 halt_wakeup; |
715 | u32 request_irq_exits; | 717 | u32 request_irq_exits; |
716 | u32 irq_exits; | 718 | u32 irq_exits; |
@@ -1224,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | |||
1224 | 1226 | ||
1225 | int kvm_is_in_guest(void); | 1227 | int kvm_is_in_guest(void); |
1226 | 1228 | ||
1227 | int __x86_set_memory_region(struct kvm *kvm, | 1229 | int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); |
1228 | const struct kvm_userspace_memory_region *mem); | 1230 | int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); |
1229 | int x86_set_memory_region(struct kvm *kvm, | ||
1230 | const struct kvm_userspace_memory_region *mem); | ||
1231 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); | 1231 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); |
1232 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); | 1232 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); |
1233 | 1233 | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index c1c0a1c14344..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -141,6 +141,8 @@ | |||
141 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) | 141 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
142 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) | 142 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
143 | 143 | ||
144 | #define MSR_PEBS_FRONTEND 0x000003f7 | ||
145 | |||
144 | #define MSR_IA32_POWER_CTL 0x000001fc | 146 | #define MSR_IA32_POWER_CTL 0x000001fc |
145 | 147 | ||
146 | #define MSR_IA32_MC0_CTL 0x00000400 | 148 | #define MSR_IA32_MC0_CTL 0x00000400 |
@@ -331,6 +333,7 @@ | |||
331 | /* C1E active bits in int pending message */ | 333 | /* C1E active bits in int pending message */ |
332 | #define K8_INTP_C1E_ACTIVE_MASK 0x18000000 | 334 | #define K8_INTP_C1E_ACTIVE_MASK 0x18000000 |
333 | #define MSR_K8_TSEG_ADDR 0xc0010112 | 335 | #define MSR_K8_TSEG_ADDR 0xc0010112 |
336 | #define MSR_K8_TSEG_MASK 0xc0010113 | ||
334 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ | 337 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
335 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ | 338 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
336 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ | 339 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index ce029e4fa7c6..31247b5bff7c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -97,7 +97,6 @@ struct pv_lazy_ops { | |||
97 | struct pv_time_ops { | 97 | struct pv_time_ops { |
98 | unsigned long long (*sched_clock)(void); | 98 | unsigned long long (*sched_clock)(void); |
99 | unsigned long long (*steal_clock)(int cpu); | 99 | unsigned long long (*steal_clock)(int cpu); |
100 | unsigned long (*get_tsc_khz)(void); | ||
101 | }; | 100 | }; |
102 | 101 | ||
103 | struct pv_cpu_ops { | 102 | struct pv_cpu_ops { |
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h | |||
@@ -41,6 +41,7 @@ struct pvclock_wall_clock { | |||
41 | 41 | ||
42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) | 42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) |
43 | #define PVCLOCK_GUEST_STOPPED (1 << 1) | 43 | #define PVCLOCK_GUEST_STOPPED (1 << 1) |
44 | /* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ | ||
44 | #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) | 45 | #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) |
45 | #endif /* __ASSEMBLY__ */ | 46 | #endif /* __ASSEMBLY__ */ |
46 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ | 47 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ |
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 9d51fae1cba3..eaba08076030 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock) | |||
39 | } | 39 | } |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #define virt_queued_spin_lock virt_queued_spin_lock | 42 | #ifdef CONFIG_PARAVIRT |
43 | 43 | #define virt_spin_lock virt_spin_lock | |
44 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) | 44 | static inline bool virt_spin_lock(struct qspinlock *lock) |
45 | { | 45 | { |
46 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | 46 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) |
47 | return false; | 47 | return false; |
48 | 48 | ||
49 | while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) | 49 | /* |
50 | cpu_relax(); | 50 | * On hypervisors without PARAVIRT_SPINLOCKS support we fall |
51 | * back to a Test-and-Set spinlock, because fair locks have | ||
52 | * horrible lock 'holder' preemption issues. | ||
53 | */ | ||
54 | |||
55 | do { | ||
56 | while (atomic_read(&lock->val) != 0) | ||
57 | cpu_relax(); | ||
58 | } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); | ||
51 | 59 | ||
52 | return true; | 60 | return true; |
53 | } | 61 | } |
62 | #endif /* CONFIG_PARAVIRT */ | ||
54 | 63 | ||
55 | #include <asm-generic/qspinlock.h> | 64 | #include <asm-generic/qspinlock.h> |
56 | 65 | ||
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 83aea8055119..4c20dd333412 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc) | |||
336 | return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); | 336 | return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); |
337 | } | 337 | } |
338 | 338 | ||
339 | static inline int | 339 | static inline long |
340 | HYPERVISOR_memory_op(unsigned int cmd, void *arg) | 340 | HYPERVISOR_memory_op(unsigned int cmd, void *arg) |
341 | { | 341 | { |
342 | return _hypercall2(int, memory_op, cmd, arg); | 342 | return _hypercall2(long, memory_op, cmd, arg); |
343 | } | 343 | } |
344 | 344 | ||
345 | static inline int | 345 | static inline int |
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_X86_BITSPERLONG_H | 1 | #ifndef __ASM_X86_BITSPERLONG_H |
2 | #define __ASM_X86_BITSPERLONG_H | 2 | #define __ASM_X86_BITSPERLONG_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #if defined(__x86_64__) && !defined(__ILP32__) |
5 | # define __BITS_PER_LONG 64 | 5 | # define __BITS_PER_LONG 64 |
6 | #else | 6 | #else |
7 | # define __BITS_PER_LONG 32 | 7 | # define __BITS_PER_LONG 32 |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c42827eb86cf..25f909362b7a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -338,10 +338,15 @@ done: | |||
338 | 338 | ||
339 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) | 339 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) |
340 | { | 340 | { |
341 | unsigned long flags; | ||
342 | |||
341 | if (instr[0] != 0x90) | 343 | if (instr[0] != 0x90) |
342 | return; | 344 | return; |
343 | 345 | ||
346 | local_irq_save(flags); | ||
344 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); | 347 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
348 | sync_core(); | ||
349 | local_irq_restore(flags); | ||
345 | 350 | ||
346 | DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", | 351 | DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", |
347 | instr, a->instrlen - a->padlen, a->padlen); | 352 | instr, a->instrlen - a->padlen, a->padlen); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3ca3e46aa405..24e94ce454e2 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | |||
336 | apic_write(APIC_LVTT, lvtt_value); | 336 | apic_write(APIC_LVTT, lvtt_value); |
337 | 337 | ||
338 | if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { | 338 | if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { |
339 | /* | ||
340 | * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode, | ||
341 | * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. | ||
342 | * According to Intel, MFENCE can do the serialization here. | ||
343 | */ | ||
344 | asm volatile("mfence" : : : "memory"); | ||
345 | |||
339 | printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); | 346 | printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); |
340 | return; | 347 | return; |
341 | } | 348 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 38a76f826530..5c60bb162622 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2522,6 +2522,7 @@ void __init setup_ioapic_dest(void) | |||
2522 | int pin, ioapic, irq, irq_entry; | 2522 | int pin, ioapic, irq, irq_entry; |
2523 | const struct cpumask *mask; | 2523 | const struct cpumask *mask; |
2524 | struct irq_data *idata; | 2524 | struct irq_data *idata; |
2525 | struct irq_chip *chip; | ||
2525 | 2526 | ||
2526 | if (skip_ioapic_setup == 1) | 2527 | if (skip_ioapic_setup == 1) |
2527 | return; | 2528 | return; |
@@ -2545,9 +2546,9 @@ void __init setup_ioapic_dest(void) | |||
2545 | else | 2546 | else |
2546 | mask = apic->target_cpus(); | 2547 | mask = apic->target_cpus(); |
2547 | 2548 | ||
2548 | irq_set_affinity(irq, mask); | 2549 | chip = irq_data_get_irq_chip(idata); |
2550 | chip->irq_set_affinity(idata, mask, false); | ||
2549 | } | 2551 | } |
2550 | |||
2551 | } | 2552 | } |
2552 | #endif | 2553 | #endif |
2553 | 2554 | ||
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 1bbd0fe2c806..836d11b92811 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -489,10 +489,8 @@ static int apic_set_affinity(struct irq_data *irq_data, | |||
489 | 489 | ||
490 | err = assign_irq_vector(irq, data, dest); | 490 | err = assign_irq_vector(irq, data, dest); |
491 | if (err) { | 491 | if (err) { |
492 | struct irq_data *top = irq_get_irq_data(irq); | ||
493 | |||
494 | if (assign_irq_vector(irq, data, | 492 | if (assign_irq_vector(irq, data, |
495 | irq_data_get_affinity_mask(top))) | 493 | irq_data_get_affinity_mask(irq_data))) |
496 | pr_err("Failed to recover vector for irq %d\n", irq); | 494 | pr_err("Failed to recover vector for irq %d\n", irq); |
497 | return err; | 495 | return err; |
498 | } | 496 | } |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 07ce52c22ec8..de22ea7ff82f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1110,10 +1110,10 @@ void print_cpu_info(struct cpuinfo_x86 *c) | |||
1110 | else | 1110 | else |
1111 | printk(KERN_CONT "%d86", c->x86); | 1111 | printk(KERN_CONT "%d86", c->x86); |
1112 | 1112 | ||
1113 | printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); | 1113 | printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); |
1114 | 1114 | ||
1115 | if (c->x86_mask || c->cpuid_level >= 0) | 1115 | if (c->x86_mask || c->cpuid_level >= 0) |
1116 | printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); | 1116 | printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); |
1117 | else | 1117 | else |
1118 | printk(KERN_CONT ")\n"); | 1118 | printk(KERN_CONT ")\n"); |
1119 | 1119 | ||
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -34,11 +34,10 @@ | |||
34 | struct ms_hyperv_info ms_hyperv; | 34 | struct ms_hyperv_info ms_hyperv; |
35 | EXPORT_SYMBOL_GPL(ms_hyperv); | 35 | EXPORT_SYMBOL_GPL(ms_hyperv); |
36 | 36 | ||
37 | static void (*hv_kexec_handler)(void); | ||
38 | static void (*hv_crash_handler)(struct pt_regs *regs); | ||
39 | |||
40 | #if IS_ENABLED(CONFIG_HYPERV) | 37 | #if IS_ENABLED(CONFIG_HYPERV) |
41 | static void (*vmbus_handler)(void); | 38 | static void (*vmbus_handler)(void); |
39 | static void (*hv_kexec_handler)(void); | ||
40 | static void (*hv_crash_handler)(struct pt_regs *regs); | ||
42 | 41 | ||
43 | void hyperv_vector_handler(struct pt_regs *regs) | 42 | void hyperv_vector_handler(struct pt_regs *regs) |
44 | { | 43 | { |
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) | |||
96 | hv_crash_handler = NULL; | 95 | hv_crash_handler = NULL; |
97 | } | 96 | } |
98 | EXPORT_SYMBOL_GPL(hv_remove_crash_handler); | 97 | EXPORT_SYMBOL_GPL(hv_remove_crash_handler); |
99 | #endif | ||
100 | 98 | ||
99 | #ifdef CONFIG_KEXEC_CORE | ||
101 | static void hv_machine_shutdown(void) | 100 | static void hv_machine_shutdown(void) |
102 | { | 101 | { |
103 | if (kexec_in_progress && hv_kexec_handler) | 102 | if (kexec_in_progress && hv_kexec_handler) |
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) | |||
111 | hv_crash_handler(regs); | 110 | hv_crash_handler(regs); |
112 | native_machine_crash_shutdown(regs); | 111 | native_machine_crash_shutdown(regs); |
113 | } | 112 | } |
114 | 113 | #endif /* CONFIG_KEXEC_CORE */ | |
114 | #endif /* CONFIG_HYPERV */ | ||
115 | 115 | ||
116 | static uint32_t __init ms_hyperv_platform(void) | 116 | static uint32_t __init ms_hyperv_platform(void) |
117 | { | 117 | { |
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) | |||
186 | no_timer_check = 1; | 186 | no_timer_check = 1; |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) | ||
189 | machine_ops.shutdown = hv_machine_shutdown; | 190 | machine_ops.shutdown = hv_machine_shutdown; |
190 | machine_ops.crash_shutdown = hv_machine_crash_shutdown; | 191 | machine_ops.crash_shutdown = hv_machine_crash_shutdown; |
192 | #endif | ||
191 | mark_tsc_unstable("running on Hyper-V"); | 193 | mark_tsc_unstable("running on Hyper-V"); |
192 | } | 194 | } |
193 | 195 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -47,6 +47,7 @@ enum extra_reg_type { | |||
47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | 47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
48 | EXTRA_REG_LBR = 2, /* lbr_select */ | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ | 49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
50 | EXTRA_REG_FE = 4, /* fe_* */ | ||
50 | 51 | ||
51 | EXTRA_REG_MAX /* number of entries needed */ | 52 | EXTRA_REG_MAX /* number of entries needed */ |
52 | }; | 53 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index cd9b6d0b10bf..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { | |||
205 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 205 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
206 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 206 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), |
207 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 207 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
208 | /* | ||
209 | * Note the low 8 bits eventsel code is not a continuous field, containing | ||
210 | * some #GPing bits. These are masked out. | ||
211 | */ | ||
212 | INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), | ||
208 | EVENT_EXTRA_END | 213 | EVENT_EXTRA_END |
209 | }; | 214 | }; |
210 | 215 | ||
@@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { | |||
250 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 255 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
251 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 256 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
252 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ | 257 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ |
253 | INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ | 258 | INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ |
254 | EVENT_CONSTRAINT_END | 259 | EVENT_CONSTRAINT_END |
255 | }; | 260 | }; |
256 | 261 | ||
@@ -2316,9 +2321,12 @@ static struct event_constraint * | |||
2316 | intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 2321 | intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
2317 | struct perf_event *event) | 2322 | struct perf_event *event) |
2318 | { | 2323 | { |
2319 | struct event_constraint *c1 = cpuc->event_constraint[idx]; | 2324 | struct event_constraint *c1 = NULL; |
2320 | struct event_constraint *c2; | 2325 | struct event_constraint *c2; |
2321 | 2326 | ||
2327 | if (idx >= 0) /* fake does < 0 */ | ||
2328 | c1 = cpuc->event_constraint[idx]; | ||
2329 | |||
2322 | /* | 2330 | /* |
2323 | * first time only | 2331 | * first time only |
2324 | * - static constraint: no change across incremental scheduling calls | 2332 | * - static constraint: no change across incremental scheduling calls |
@@ -2888,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | |||
2888 | 2896 | ||
2889 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 2897 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
2890 | 2898 | ||
2899 | PMU_FORMAT_ATTR(frontend, "config1:0-23"); | ||
2900 | |||
2891 | static struct attribute *intel_arch3_formats_attr[] = { | 2901 | static struct attribute *intel_arch3_formats_attr[] = { |
2892 | &format_attr_event.attr, | 2902 | &format_attr_event.attr, |
2893 | &format_attr_umask.attr, | 2903 | &format_attr_umask.attr, |
@@ -2904,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { | |||
2904 | NULL, | 2914 | NULL, |
2905 | }; | 2915 | }; |
2906 | 2916 | ||
2917 | static struct attribute *skl_format_attr[] = { | ||
2918 | &format_attr_frontend.attr, | ||
2919 | NULL, | ||
2920 | }; | ||
2921 | |||
2907 | static __initconst const struct x86_pmu core_pmu = { | 2922 | static __initconst const struct x86_pmu core_pmu = { |
2908 | .name = "core", | 2923 | .name = "core", |
2909 | .handle_irq = x86_pmu_handle_irq, | 2924 | .handle_irq = x86_pmu_handle_irq, |
@@ -3513,7 +3528,8 @@ __init int intel_pmu_init(void) | |||
3513 | 3528 | ||
3514 | x86_pmu.hw_config = hsw_hw_config; | 3529 | x86_pmu.hw_config = hsw_hw_config; |
3515 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | 3530 | x86_pmu.get_event_constraints = hsw_get_event_constraints; |
3516 | x86_pmu.cpu_events = hsw_events_attrs; | 3531 | x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, |
3532 | skl_format_attr); | ||
3517 | WARN_ON(!x86_pmu.format_attrs); | 3533 | WARN_ON(!x86_pmu.format_attrs); |
3518 | x86_pmu.cpu_events = hsw_events_attrs; | 3534 | x86_pmu.cpu_events = hsw_events_attrs; |
3519 | pr_cont("Skylake events, "); | 3535 | pr_cont("Skylake events, "); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 54690e885759..d1c0f254afbe 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c | |||
@@ -222,6 +222,7 @@ static void __bts_event_start(struct perf_event *event) | |||
222 | if (!buf || bts_buffer_is_full(buf, bts)) | 222 | if (!buf || bts_buffer_is_full(buf, bts)) |
223 | return; | 223 | return; |
224 | 224 | ||
225 | event->hw.itrace_started = 1; | ||
225 | event->hw.state = 0; | 226 | event->hw.state = 0; |
226 | 227 | ||
227 | if (!buf->snapshot) | 228 | if (!buf->snapshot) |
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c | |||
@@ -10,12 +10,12 @@ enum perf_msr_id { | |||
10 | PERF_MSR_EVENT_MAX, | 10 | PERF_MSR_EVENT_MAX, |
11 | }; | 11 | }; |
12 | 12 | ||
13 | bool test_aperfmperf(int idx) | 13 | static bool test_aperfmperf(int idx) |
14 | { | 14 | { |
15 | return boot_cpu_has(X86_FEATURE_APERFMPERF); | 15 | return boot_cpu_has(X86_FEATURE_APERFMPERF); |
16 | } | 16 | } |
17 | 17 | ||
18 | bool test_intel(int idx) | 18 | static bool test_intel(int idx) |
19 | { | 19 | { |
20 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 20 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || |
21 | boot_cpu_data.x86 != 6) | 21 | boot_cpu_data.x86 != 6) |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, | 38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, |
39 | { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, | 39 | { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, |
40 | { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, | 40 | { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, |
41 | { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, | 41 | { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, |
42 | { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, | 42 | { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, |
43 | { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, | 43 | { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | #ifdef CONFIG_KEXEC_FILE | 187 | #ifdef CONFIG_KEXEC_FILE |
188 | static int get_nr_ram_ranges_callback(unsigned long start_pfn, | 188 | static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) |
189 | unsigned long nr_pfn, void *arg) | ||
190 | { | 189 | { |
191 | int *nr_ranges = arg; | 190 | unsigned int *nr_ranges = arg; |
192 | 191 | ||
193 | (*nr_ranges)++; | 192 | (*nr_ranges)++; |
194 | return 0; | 193 | return 0; |
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, | |||
214 | 213 | ||
215 | ced->image = image; | 214 | ced->image = image; |
216 | 215 | ||
217 | walk_system_ram_range(0, -1, &nr_ranges, | 216 | walk_system_ram_res(0, -1, &nr_ranges, |
218 | get_nr_ram_ranges_callback); | 217 | get_nr_ram_ranges_callback); |
219 | 218 | ||
220 | ced->max_nr_ranges = nr_ranges; | 219 | ced->max_nr_ranges = nr_ranges; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index c80cf6699678..38da8f29a9c8 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -68,11 +68,10 @@ static inline void *current_stack(void) | |||
68 | return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); | 68 | return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline int | 71 | static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) |
72 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | ||
73 | { | 72 | { |
74 | struct irq_stack *curstk, *irqstk; | 73 | struct irq_stack *curstk, *irqstk; |
75 | u32 *isp, *prev_esp, arg1, arg2; | 74 | u32 *isp, *prev_esp, arg1; |
76 | 75 | ||
77 | curstk = (struct irq_stack *) current_stack(); | 76 | curstk = (struct irq_stack *) current_stack(); |
78 | irqstk = __this_cpu_read(hardirq_stack); | 77 | irqstk = __this_cpu_read(hardirq_stack); |
@@ -98,8 +97,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
98 | asm volatile("xchgl %%ebx,%%esp \n" | 97 | asm volatile("xchgl %%ebx,%%esp \n" |
99 | "call *%%edi \n" | 98 | "call *%%edi \n" |
100 | "movl %%ebx,%%esp \n" | 99 | "movl %%ebx,%%esp \n" |
101 | : "=a" (arg1), "=d" (arg2), "=b" (isp) | 100 | : "=a" (arg1), "=b" (isp) |
102 | : "0" (irq), "1" (desc), "2" (isp), | 101 | : "0" (desc), "1" (isp), |
103 | "D" (desc->handle_irq) | 102 | "D" (desc->handle_irq) |
104 | : "memory", "cc", "ecx"); | 103 | : "memory", "cc", "ecx"); |
105 | return 1; | 104 | return 1; |
@@ -150,19 +149,15 @@ void do_softirq_own_stack(void) | |||
150 | 149 | ||
151 | bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) | 150 | bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) |
152 | { | 151 | { |
153 | unsigned int irq; | 152 | int overflow = check_stack_overflow(); |
154 | int overflow; | ||
155 | |||
156 | overflow = check_stack_overflow(); | ||
157 | 153 | ||
158 | if (IS_ERR_OR_NULL(desc)) | 154 | if (IS_ERR_OR_NULL(desc)) |
159 | return false; | 155 | return false; |
160 | 156 | ||
161 | irq = irq_desc_get_irq(desc); | 157 | if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { |
162 | if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { | ||
163 | if (unlikely(overflow)) | 158 | if (unlikely(overflow)) |
164 | print_stack_overflow(); | 159 | print_stack_overflow(); |
165 | generic_handle_irq_desc(irq, desc); | 160 | generic_handle_irq_desc(desc); |
166 | } | 161 | } |
167 | 162 | ||
168 | return true; | 163 | return true; |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index ff16ccb918f2..c767cf2bc80a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -75,6 +75,6 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) | |||
75 | if (unlikely(IS_ERR_OR_NULL(desc))) | 75 | if (unlikely(IS_ERR_OR_NULL(desc))) |
76 | return false; | 76 | return false; |
77 | 77 | ||
78 | generic_handle_irq_desc(irq_desc_get_irq(desc), desc); | 78 | generic_handle_irq_desc(desc); |
79 | return true; | 79 | return true; |
80 | } | 80 | } |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 2bcc0525f1c1..6acc9dd91f36 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -58,7 +58,7 @@ static struct ldt_struct *alloc_ldt_struct(int size) | |||
58 | if (alloc_size > PAGE_SIZE) | 58 | if (alloc_size > PAGE_SIZE) |
59 | new_ldt->entries = vzalloc(alloc_size); | 59 | new_ldt->entries = vzalloc(alloc_size); |
60 | else | 60 | else |
61 | new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); | 61 | new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); |
62 | 62 | ||
63 | if (!new_ldt->entries) { | 63 | if (!new_ldt->entries) { |
64 | kfree(new_ldt); | 64 | kfree(new_ldt); |
@@ -95,7 +95,7 @@ static void free_ldt_struct(struct ldt_struct *ldt) | |||
95 | if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) | 95 | if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) |
96 | vfree(ldt->entries); | 96 | vfree(ldt->entries); |
97 | else | 97 | else |
98 | kfree(ldt->entries); | 98 | free_page((unsigned long)ldt->entries); |
99 | kfree(ldt); | 99 | kfree(ldt); |
100 | } | 100 | } |
101 | 101 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index f68e48f5f6c2..c2130aef3f9d 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -41,10 +41,18 @@ | |||
41 | #include <asm/timer.h> | 41 | #include <asm/timer.h> |
42 | #include <asm/special_insns.h> | 42 | #include <asm/special_insns.h> |
43 | 43 | ||
44 | /* nop stub */ | 44 | /* |
45 | void _paravirt_nop(void) | 45 | * nop stub, which must not clobber anything *including the stack* to |
46 | { | 46 | * avoid confusing the entry prologues. |
47 | } | 47 | */ |
48 | extern void _paravirt_nop(void); | ||
49 | asm (".pushsection .entry.text, \"ax\"\n" | ||
50 | ".global _paravirt_nop\n" | ||
51 | "_paravirt_nop:\n\t" | ||
52 | "ret\n\t" | ||
53 | ".size _paravirt_nop, . - _paravirt_nop\n\t" | ||
54 | ".type _paravirt_nop, @function\n\t" | ||
55 | ".popsection"); | ||
48 | 56 | ||
49 | /* identity function, which can be inlined */ | 57 | /* identity function, which can be inlined */ |
50 | u32 _paravirt_ident_32(u32 x) | 58 | u32 _paravirt_ident_32(u32 x) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 84b8ef82a159..1b55de1267cf 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -131,8 +131,8 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
131 | 131 | ||
132 | bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) | 132 | bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) |
133 | { | 133 | { |
134 | *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); | ||
135 | *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 134 | *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
135 | *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); | ||
136 | 136 | ||
137 | if (!*dev) | 137 | if (!*dev) |
138 | *dev = &x86_dma_fallback_dev; | 138 | *dev = &x86_dma_fallback_dev; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..39e585a554b7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
506 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | 506 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; |
507 | } | 507 | } |
508 | 508 | ||
509 | /* | ||
510 | * Called from fs/proc with a reference on @p to find the function | ||
511 | * which called into schedule(). This needs to be done carefully | ||
512 | * because the task might wake up and we might look at a stack | ||
513 | * changing under us. | ||
514 | */ | ||
515 | unsigned long get_wchan(struct task_struct *p) | ||
516 | { | ||
517 | unsigned long start, bottom, top, sp, fp, ip; | ||
518 | int count = 0; | ||
519 | |||
520 | if (!p || p == current || p->state == TASK_RUNNING) | ||
521 | return 0; | ||
522 | |||
523 | start = (unsigned long)task_stack_page(p); | ||
524 | if (!start) | ||
525 | return 0; | ||
526 | |||
527 | /* | ||
528 | * Layout of the stack page: | ||
529 | * | ||
530 | * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) | ||
531 | * PADDING | ||
532 | * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING | ||
533 | * stack | ||
534 | * ----------- bottom = start + sizeof(thread_info) | ||
535 | * thread_info | ||
536 | * ----------- start | ||
537 | * | ||
538 | * The tasks stack pointer points at the location where the | ||
539 | * framepointer is stored. The data on the stack is: | ||
540 | * ... IP FP ... IP FP | ||
541 | * | ||
542 | * We need to read FP and IP, so we need to adjust the upper | ||
543 | * bound by another unsigned long. | ||
544 | */ | ||
545 | top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; | ||
546 | top -= 2 * sizeof(unsigned long); | ||
547 | bottom = start + sizeof(struct thread_info); | ||
548 | |||
549 | sp = READ_ONCE(p->thread.sp); | ||
550 | if (sp < bottom || sp > top) | ||
551 | return 0; | ||
552 | |||
553 | fp = READ_ONCE(*(unsigned long *)sp); | ||
554 | do { | ||
555 | if (fp < bottom || fp > top) | ||
556 | return 0; | ||
557 | ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); | ||
558 | if (!in_sched_functions(ip)) | ||
559 | return ip; | ||
560 | fp = READ_ONCE(*(unsigned long *)fp); | ||
561 | } while (count++ < 16 && p->state != TASK_RUNNING); | ||
562 | return 0; | ||
563 | } | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
324 | 324 | ||
325 | return prev_p; | 325 | return prev_p; |
326 | } | 326 | } |
327 | |||
328 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | ||
329 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | ||
330 | |||
331 | unsigned long get_wchan(struct task_struct *p) | ||
332 | { | ||
333 | unsigned long bp, sp, ip; | ||
334 | unsigned long stack_page; | ||
335 | int count = 0; | ||
336 | if (!p || p == current || p->state == TASK_RUNNING) | ||
337 | return 0; | ||
338 | stack_page = (unsigned long)task_stack_page(p); | ||
339 | sp = p->thread.sp; | ||
340 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) | ||
341 | return 0; | ||
342 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ | ||
343 | bp = *(unsigned long *) sp; | ||
344 | do { | ||
345 | if (bp < stack_page || bp > top_ebp+stack_page) | ||
346 | return 0; | ||
347 | ip = *(unsigned long *) (bp+4); | ||
348 | if (!in_sched_functions(ip)) | ||
349 | return ip; | ||
350 | bp = *(unsigned long *) bp; | ||
351 | } while (count++ < 16); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) | |||
499 | } | 499 | } |
500 | EXPORT_SYMBOL_GPL(set_personality_ia32); | 500 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
501 | 501 | ||
502 | unsigned long get_wchan(struct task_struct *p) | ||
503 | { | ||
504 | unsigned long stack; | ||
505 | u64 fp, ip; | ||
506 | int count = 0; | ||
507 | |||
508 | if (!p || p == current || p->state == TASK_RUNNING) | ||
509 | return 0; | ||
510 | stack = (unsigned long)task_stack_page(p); | ||
511 | if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) | ||
512 | return 0; | ||
513 | fp = *(u64 *)(p->thread.sp); | ||
514 | do { | ||
515 | if (fp < (unsigned long)stack || | ||
516 | fp >= (unsigned long)stack+THREAD_SIZE) | ||
517 | return 0; | ||
518 | ip = *(u64 *)(fp+8); | ||
519 | if (!in_sched_functions(ip)) | ||
520 | return ip; | ||
521 | fp = *(u64 *)fp; | ||
522 | } while (count++ < 16); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 502 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
527 | { | 503 | { |
528 | int ret = 0; | 504 | int ret = 0; |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index c8d52cb4cb6e..c3f7602cd038 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/hypervisor.h> | 21 | #include <asm/hypervisor.h> |
22 | #include <asm/nmi.h> | 22 | #include <asm/nmi.h> |
23 | #include <asm/x86_init.h> | 23 | #include <asm/x86_init.h> |
24 | #include <asm/geode.h> | ||
24 | 25 | ||
25 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ | 26 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
26 | EXPORT_SYMBOL(cpu_khz); | 27 | EXPORT_SYMBOL(cpu_khz); |
@@ -1013,15 +1014,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |||
1013 | 1014 | ||
1014 | static void __init check_system_tsc_reliable(void) | 1015 | static void __init check_system_tsc_reliable(void) |
1015 | { | 1016 | { |
1016 | #ifdef CONFIG_MGEODE_LX | 1017 | #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) |
1017 | /* RTSC counts during suspend */ | 1018 | if (is_geode_lx()) { |
1019 | /* RTSC counts during suspend */ | ||
1018 | #define RTSC_SUSP 0x100 | 1020 | #define RTSC_SUSP 0x100 |
1019 | unsigned long res_low, res_high; | 1021 | unsigned long res_low, res_high; |
1020 | 1022 | ||
1021 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | 1023 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); |
1022 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ | 1024 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ |
1023 | if (res_low & RTSC_SUSP) | 1025 | if (res_low & RTSC_SUSP) |
1024 | tsc_clocksource_reliable = 1; | 1026 | tsc_clocksource_reliable = 1; |
1027 | } | ||
1025 | #endif | 1028 | #endif |
1026 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) | 1029 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
1027 | tsc_clocksource_reliable = 1; | 1030 | tsc_clocksource_reliable = 1; |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index abd8b856bd2b..524619351961 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/audit.h> | 45 | #include <linux/audit.h> |
46 | #include <linux/stddef.h> | 46 | #include <linux/stddef.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/security.h> | ||
48 | 49 | ||
49 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
50 | #include <asm/io.h> | 51 | #include <asm/io.h> |
@@ -232,6 +233,32 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) | |||
232 | struct pt_regs *regs = current_pt_regs(); | 233 | struct pt_regs *regs = current_pt_regs(); |
233 | unsigned long err = 0; | 234 | unsigned long err = 0; |
234 | 235 | ||
236 | err = security_mmap_addr(0); | ||
237 | if (err) { | ||
238 | /* | ||
239 | * vm86 cannot virtualize the address space, so vm86 users | ||
240 | * need to manage the low 1MB themselves using mmap. Given | ||
241 | * that BIOS places important data in the first page, vm86 | ||
242 | * is essentially useless if mmap_min_addr != 0. DOSEMU, | ||
243 | * for example, won't even bother trying to use vm86 if it | ||
244 | * can't map a page at virtual address 0. | ||
245 | * | ||
246 | * To reduce the available kernel attack surface, simply | ||
247 | * disallow vm86(old) for users who cannot mmap at va 0. | ||
248 | * | ||
249 | * The implementation of security_mmap_addr will allow | ||
250 | * suitably privileged users to map va 0 even if | ||
251 | * vm.mmap_min_addr is set above 0, and we want this | ||
252 | * behavior for vm86 as well, as it ensures that legacy | ||
253 | * tools like vbetool will not fail just because of | ||
254 | * vm.mmap_min_addr. | ||
255 | */ | ||
256 | pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", | ||
257 | current->comm, task_pid_nr(current), | ||
258 | from_kuid_munged(&init_user_ns, current_uid())); | ||
259 | return -EPERM; | ||
260 | } | ||
261 | |||
235 | if (!vm86) { | 262 | if (!vm86) { |
236 | if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) | 263 | if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) |
237 | return -ENOMEM; | 264 | return -ENOMEM; |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b372a7557c16..9da95b9daf8d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2418 | u64 val, cr0, cr4; | 2418 | u64 val, cr0, cr4; |
2419 | u32 base3; | 2419 | u32 base3; |
2420 | u16 selector; | 2420 | u16 selector; |
2421 | int i; | 2421 | int i, r; |
2422 | 2422 | ||
2423 | for (i = 0; i < 16; i++) | 2423 | for (i = 0; i < 16; i++) |
2424 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); | 2424 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); |
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2460 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); | 2460 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); |
2461 | ctxt->ops->set_gdt(ctxt, &dt); | 2461 | ctxt->ops->set_gdt(ctxt, &dt); |
2462 | 2462 | ||
2463 | r = rsm_enter_protected_mode(ctxt, cr0, cr4); | ||
2464 | if (r != X86EMUL_CONTINUE) | ||
2465 | return r; | ||
2466 | |||
2463 | for (i = 0; i < 6; i++) { | 2467 | for (i = 0; i < 6; i++) { |
2464 | int r = rsm_load_seg_64(ctxt, smbase, i); | 2468 | r = rsm_load_seg_64(ctxt, smbase, i); |
2465 | if (r != X86EMUL_CONTINUE) | 2469 | if (r != X86EMUL_CONTINUE) |
2466 | return r; | 2470 | return r; |
2467 | } | 2471 | } |
2468 | 2472 | ||
2469 | return rsm_enter_protected_mode(ctxt, cr0, cr4); | 2473 | return X86EMUL_CONTINUE; |
2470 | } | 2474 | } |
2471 | 2475 | ||
2472 | static int em_rsm(struct x86_emulate_ctxt *ctxt) | 2476 | static int em_rsm(struct x86_emulate_ctxt *ctxt) |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 69088a1ba509..ff606f507913 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) | |||
3322 | break; | 3322 | break; |
3323 | 3323 | ||
3324 | reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, | 3324 | reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, |
3325 | leaf); | 3325 | iterator.level); |
3326 | } | 3326 | } |
3327 | 3327 | ||
3328 | walk_shadow_page_lockless_end(vcpu); | 3328 | walk_shadow_page_lockless_end(vcpu); |
@@ -3614,7 +3614,7 @@ static void | |||
3614 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | 3614 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
3615 | struct rsvd_bits_validate *rsvd_check, | 3615 | struct rsvd_bits_validate *rsvd_check, |
3616 | int maxphyaddr, int level, bool nx, bool gbpages, | 3616 | int maxphyaddr, int level, bool nx, bool gbpages, |
3617 | bool pse) | 3617 | bool pse, bool amd) |
3618 | { | 3618 | { |
3619 | u64 exb_bit_rsvd = 0; | 3619 | u64 exb_bit_rsvd = 0; |
3620 | u64 gbpages_bit_rsvd = 0; | 3620 | u64 gbpages_bit_rsvd = 0; |
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
3631 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for | 3631 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for |
3632 | * leaf entries) on AMD CPUs only. | 3632 | * leaf entries) on AMD CPUs only. |
3633 | */ | 3633 | */ |
3634 | if (guest_cpuid_is_amd(vcpu)) | 3634 | if (amd) |
3635 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); | 3635 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); |
3636 | 3636 | ||
3637 | switch (level) { | 3637 | switch (level) { |
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
3699 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, | 3699 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
3700 | cpuid_maxphyaddr(vcpu), context->root_level, | 3700 | cpuid_maxphyaddr(vcpu), context->root_level, |
3701 | context->nx, guest_cpuid_has_gbpages(vcpu), | 3701 | context->nx, guest_cpuid_has_gbpages(vcpu), |
3702 | is_pse(vcpu)); | 3702 | is_pse(vcpu), guest_cpuid_is_amd(vcpu)); |
3703 | } | 3703 | } |
3704 | 3704 | ||
3705 | static void | 3705 | static void |
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, | |||
3749 | void | 3749 | void |
3750 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | 3750 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
3751 | { | 3751 | { |
3752 | /* | ||
3753 | * Passing "true" to the last argument is okay; it adds a check | ||
3754 | * on bit 8 of the SPTEs which KVM doesn't use anyway. | ||
3755 | */ | ||
3752 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, | 3756 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, |
3753 | boot_cpu_data.x86_phys_bits, | 3757 | boot_cpu_data.x86_phys_bits, |
3754 | context->shadow_root_level, context->nx, | 3758 | context->shadow_root_level, context->nx, |
3755 | guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); | 3759 | guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), |
3760 | true); | ||
3756 | } | 3761 | } |
3757 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); | 3762 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
3758 | 3763 | ||
3764 | static inline bool boot_cpu_is_amd(void) | ||
3765 | { | ||
3766 | WARN_ON_ONCE(!tdp_enabled); | ||
3767 | return shadow_x_mask == 0; | ||
3768 | } | ||
3769 | |||
3759 | /* | 3770 | /* |
3760 | * the direct page table on host, use as much mmu features as | 3771 | * the direct page table on host, use as much mmu features as |
3761 | * possible, however, kvm currently does not do execution-protection. | 3772 | * possible, however, kvm currently does not do execution-protection. |
@@ -3764,11 +3775,11 @@ static void | |||
3764 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, | 3775 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
3765 | struct kvm_mmu *context) | 3776 | struct kvm_mmu *context) |
3766 | { | 3777 | { |
3767 | if (guest_cpuid_is_amd(vcpu)) | 3778 | if (boot_cpu_is_amd()) |
3768 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, | 3779 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, |
3769 | boot_cpu_data.x86_phys_bits, | 3780 | boot_cpu_data.x86_phys_bits, |
3770 | context->shadow_root_level, false, | 3781 | context->shadow_root_level, false, |
3771 | cpu_has_gbpages, true); | 3782 | cpu_has_gbpages, true, true); |
3772 | else | 3783 | else |
3773 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, | 3784 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, |
3774 | boot_cpu_data.x86_phys_bits, | 3785 | boot_cpu_data.x86_phys_bits, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index fdb8cb63a6c0..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -202,6 +202,7 @@ module_param(npt, int, S_IRUGO); | |||
202 | static int nested = true; | 202 | static int nested = true; |
203 | module_param(nested, int, S_IRUGO); | 203 | module_param(nested, int, S_IRUGO); |
204 | 204 | ||
205 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
205 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); | 206 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); |
206 | static void svm_complete_interrupts(struct vcpu_svm *svm); | 207 | static void svm_complete_interrupts(struct vcpu_svm *svm); |
207 | 208 | ||
@@ -513,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
513 | struct vcpu_svm *svm = to_svm(vcpu); | 514 | struct vcpu_svm *svm = to_svm(vcpu); |
514 | 515 | ||
515 | if (svm->vmcb->control.next_rip != 0) { | 516 | if (svm->vmcb->control.next_rip != 0) { |
516 | WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); | 517 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
517 | svm->next_rip = svm->vmcb->control.next_rip; | 518 | svm->next_rip = svm->vmcb->control.next_rip; |
518 | } | 519 | } |
519 | 520 | ||
@@ -865,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) | |||
865 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); | 866 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); |
866 | } | 867 | } |
867 | 868 | ||
868 | #define MTRR_TYPE_UC_MINUS 7 | ||
869 | #define MTRR2PROTVAL_INVALID 0xff | ||
870 | |||
871 | static u8 mtrr2protval[8]; | ||
872 | |||
873 | static u8 fallback_mtrr_type(int mtrr) | ||
874 | { | ||
875 | /* | ||
876 | * WT and WP aren't always available in the host PAT. Treat | ||
877 | * them as UC and UC- respectively. Everything else should be | ||
878 | * there. | ||
879 | */ | ||
880 | switch (mtrr) | ||
881 | { | ||
882 | case MTRR_TYPE_WRTHROUGH: | ||
883 | return MTRR_TYPE_UNCACHABLE; | ||
884 | case MTRR_TYPE_WRPROT: | ||
885 | return MTRR_TYPE_UC_MINUS; | ||
886 | default: | ||
887 | BUG(); | ||
888 | } | ||
889 | } | ||
890 | |||
891 | static void build_mtrr2protval(void) | ||
892 | { | ||
893 | int i; | ||
894 | u64 pat; | ||
895 | |||
896 | for (i = 0; i < 8; i++) | ||
897 | mtrr2protval[i] = MTRR2PROTVAL_INVALID; | ||
898 | |||
899 | /* Ignore the invalid MTRR types. */ | ||
900 | mtrr2protval[2] = 0; | ||
901 | mtrr2protval[3] = 0; | ||
902 | |||
903 | /* | ||
904 | * Use host PAT value to figure out the mapping from guest MTRR | ||
905 | * values to nested page table PAT/PCD/PWT values. We do not | ||
906 | * want to change the host PAT value every time we enter the | ||
907 | * guest. | ||
908 | */ | ||
909 | rdmsrl(MSR_IA32_CR_PAT, pat); | ||
910 | for (i = 0; i < 8; i++) { | ||
911 | u8 mtrr = pat >> (8 * i); | ||
912 | |||
913 | if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) | ||
914 | mtrr2protval[mtrr] = __cm_idx2pte(i); | ||
915 | } | ||
916 | |||
917 | for (i = 0; i < 8; i++) { | ||
918 | if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { | ||
919 | u8 fallback = fallback_mtrr_type(i); | ||
920 | mtrr2protval[i] = mtrr2protval[fallback]; | ||
921 | BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); | ||
922 | } | ||
923 | } | ||
924 | } | ||
925 | |||
926 | static __init int svm_hardware_setup(void) | 869 | static __init int svm_hardware_setup(void) |
927 | { | 870 | { |
928 | int cpu; | 871 | int cpu; |
@@ -989,7 +932,6 @@ static __init int svm_hardware_setup(void) | |||
989 | } else | 932 | } else |
990 | kvm_disable_tdp(); | 933 | kvm_disable_tdp(); |
991 | 934 | ||
992 | build_mtrr2protval(); | ||
993 | return 0; | 935 | return 0; |
994 | 936 | ||
995 | err: | 937 | err: |
@@ -1144,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
1144 | return target_tsc - tsc; | 1086 | return target_tsc - tsc; |
1145 | } | 1087 | } |
1146 | 1088 | ||
1147 | static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) | ||
1148 | { | ||
1149 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
1150 | |||
1151 | /* Unlike Intel, AMD takes the guest's CR0.CD into account. | ||
1152 | * | ||
1153 | * AMD does not have IPAT. To emulate it for the case of guests | ||
1154 | * with no assigned devices, just set everything to WB. If guests | ||
1155 | * have assigned devices, however, we cannot force WB for RAM | ||
1156 | * pages only, so use the guest PAT directly. | ||
1157 | */ | ||
1158 | if (!kvm_arch_has_assigned_device(vcpu->kvm)) | ||
1159 | *g_pat = 0x0606060606060606; | ||
1160 | else | ||
1161 | *g_pat = vcpu->arch.pat; | ||
1162 | } | ||
1163 | |||
1164 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | ||
1165 | { | ||
1166 | u8 mtrr; | ||
1167 | |||
1168 | /* | ||
1169 | * 1. MMIO: trust guest MTRR, so same as item 3. | ||
1170 | * 2. No passthrough: always map as WB, and force guest PAT to WB as well | ||
1171 | * 3. Passthrough: can't guarantee the result, try to trust guest. | ||
1172 | */ | ||
1173 | if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) | ||
1174 | return 0; | ||
1175 | |||
1176 | if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && | ||
1177 | kvm_read_cr0(vcpu) & X86_CR0_CD) | ||
1178 | return _PAGE_NOCACHE; | ||
1179 | |||
1180 | mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); | ||
1181 | return mtrr2protval[mtrr]; | ||
1182 | } | ||
1183 | |||
1184 | static void init_vmcb(struct vcpu_svm *svm, bool init_event) | 1089 | static void init_vmcb(struct vcpu_svm *svm, bool init_event) |
1185 | { | 1090 | { |
1186 | struct vmcb_control_area *control = &svm->vmcb->control; | 1091 | struct vmcb_control_area *control = &svm->vmcb->control; |
@@ -1263,7 +1168,8 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) | |||
1263 | * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. | 1168 | * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. |
1264 | * It also updates the guest-visible cr0 value. | 1169 | * It also updates the guest-visible cr0 value. |
1265 | */ | 1170 | */ |
1266 | (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); | 1171 | svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); |
1172 | kvm_mmu_reset_context(&svm->vcpu); | ||
1267 | 1173 | ||
1268 | save->cr4 = X86_CR4_PAE; | 1174 | save->cr4 = X86_CR4_PAE; |
1269 | /* rdx = ?? */ | 1175 | /* rdx = ?? */ |
@@ -1276,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) | |||
1276 | clr_cr_intercept(svm, INTERCEPT_CR3_READ); | 1182 | clr_cr_intercept(svm, INTERCEPT_CR3_READ); |
1277 | clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); | 1183 | clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); |
1278 | save->g_pat = svm->vcpu.arch.pat; | 1184 | save->g_pat = svm->vcpu.arch.pat; |
1279 | svm_set_guest_pat(svm, &save->g_pat); | ||
1280 | save->cr3 = 0; | 1185 | save->cr3 = 0; |
1281 | save->cr4 = 0; | 1186 | save->cr4 = 0; |
1282 | } | 1187 | } |
@@ -1671,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1671 | 1576 | ||
1672 | if (!vcpu->fpu_active) | 1577 | if (!vcpu->fpu_active) |
1673 | cr0 |= X86_CR0_TS; | 1578 | cr0 |= X86_CR0_TS; |
1674 | 1579 | /* | |
1675 | /* These are emulated via page tables. */ | 1580 | * re-enable caching here because the QEMU bios |
1676 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 1581 | * does not do it - this results in some delay at |
1677 | 1582 | * reboot | |
1583 | */ | ||
1584 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) | ||
1585 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | ||
1678 | svm->vmcb->save.cr0 = cr0; | 1586 | svm->vmcb->save.cr0 = cr0; |
1679 | mark_dirty(svm->vmcb, VMCB_CR); | 1587 | mark_dirty(svm->vmcb, VMCB_CR); |
1680 | update_cr0_intercept(svm); | 1588 | update_cr0_intercept(svm); |
@@ -3349,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
3349 | case MSR_VM_IGNNE: | 3257 | case MSR_VM_IGNNE: |
3350 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); | 3258 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
3351 | break; | 3259 | break; |
3352 | case MSR_IA32_CR_PAT: | ||
3353 | if (npt_enabled) { | ||
3354 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) | ||
3355 | return 1; | ||
3356 | vcpu->arch.pat = data; | ||
3357 | svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); | ||
3358 | mark_dirty(svm->vmcb, VMCB_NPT); | ||
3359 | break; | ||
3360 | } | ||
3361 | /* fall through */ | ||
3362 | default: | 3260 | default: |
3363 | return kvm_set_msr_common(vcpu, msr); | 3261 | return kvm_set_msr_common(vcpu, msr); |
3364 | } | 3262 | } |
@@ -4193,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) | |||
4193 | return true; | 4091 | return true; |
4194 | } | 4092 | } |
4195 | 4093 | ||
4094 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | ||
4095 | { | ||
4096 | return 0; | ||
4097 | } | ||
4098 | |||
4196 | static void svm_cpuid_update(struct kvm_vcpu *vcpu) | 4099 | static void svm_cpuid_update(struct kvm_vcpu *vcpu) |
4197 | { | 4100 | { |
4198 | } | 4101 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d01986832afc..6a8bc64566ab 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg) | |||
4105 | static int alloc_apic_access_page(struct kvm *kvm) | 4105 | static int alloc_apic_access_page(struct kvm *kvm) |
4106 | { | 4106 | { |
4107 | struct page *page; | 4107 | struct page *page; |
4108 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
4109 | int r = 0; | 4108 | int r = 0; |
4110 | 4109 | ||
4111 | mutex_lock(&kvm->slots_lock); | 4110 | mutex_lock(&kvm->slots_lock); |
4112 | if (kvm->arch.apic_access_page_done) | 4111 | if (kvm->arch.apic_access_page_done) |
4113 | goto out; | 4112 | goto out; |
4114 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | 4113 | r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, |
4115 | kvm_userspace_mem.flags = 0; | 4114 | APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); |
4116 | kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; | ||
4117 | kvm_userspace_mem.memory_size = PAGE_SIZE; | ||
4118 | r = __x86_set_memory_region(kvm, &kvm_userspace_mem); | ||
4119 | if (r) | 4115 | if (r) |
4120 | goto out; | 4116 | goto out; |
4121 | 4117 | ||
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm) | |||
4140 | { | 4136 | { |
4141 | /* Called with kvm->slots_lock held. */ | 4137 | /* Called with kvm->slots_lock held. */ |
4142 | 4138 | ||
4143 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
4144 | int r = 0; | 4139 | int r = 0; |
4145 | 4140 | ||
4146 | BUG_ON(kvm->arch.ept_identity_pagetable_done); | 4141 | BUG_ON(kvm->arch.ept_identity_pagetable_done); |
4147 | 4142 | ||
4148 | kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; | 4143 | r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, |
4149 | kvm_userspace_mem.flags = 0; | 4144 | kvm->arch.ept_identity_map_addr, PAGE_SIZE); |
4150 | kvm_userspace_mem.guest_phys_addr = | ||
4151 | kvm->arch.ept_identity_map_addr; | ||
4152 | kvm_userspace_mem.memory_size = PAGE_SIZE; | ||
4153 | r = __x86_set_memory_region(kvm, &kvm_userspace_mem); | ||
4154 | 4145 | ||
4155 | return r; | 4146 | return r; |
4156 | } | 4147 | } |
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
4949 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | 4940 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
4950 | { | 4941 | { |
4951 | int ret; | 4942 | int ret; |
4952 | struct kvm_userspace_memory_region tss_mem = { | ||
4953 | .slot = TSS_PRIVATE_MEMSLOT, | ||
4954 | .guest_phys_addr = addr, | ||
4955 | .memory_size = PAGE_SIZE * 3, | ||
4956 | .flags = 0, | ||
4957 | }; | ||
4958 | 4943 | ||
4959 | ret = x86_set_memory_region(kvm, &tss_mem); | 4944 | ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, |
4945 | PAGE_SIZE * 3); | ||
4960 | if (ret) | 4946 | if (ret) |
4961 | return ret; | 4947 | return ret; |
4962 | kvm->arch.tss_addr = addr; | 4948 | kvm->arch.tss_addr = addr; |
@@ -6064,6 +6050,8 @@ static __init int hardware_setup(void) | |||
6064 | memcpy(vmx_msr_bitmap_longmode_x2apic, | 6050 | memcpy(vmx_msr_bitmap_longmode_x2apic, |
6065 | vmx_msr_bitmap_longmode, PAGE_SIZE); | 6051 | vmx_msr_bitmap_longmode, PAGE_SIZE); |
6066 | 6052 | ||
6053 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ | ||
6054 | |||
6067 | if (enable_apicv) { | 6055 | if (enable_apicv) { |
6068 | for (msr = 0x800; msr <= 0x8ff; msr++) | 6056 | for (msr = 0x800; msr <= 0x8ff; msr++) |
6069 | vmx_disable_intercept_msr_read_x2apic(msr); | 6057 | vmx_disable_intercept_msr_read_x2apic(msr); |
@@ -8615,17 +8603,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
8615 | u64 ipat = 0; | 8603 | u64 ipat = 0; |
8616 | 8604 | ||
8617 | /* For VT-d and EPT combination | 8605 | /* For VT-d and EPT combination |
8618 | * 1. MMIO: guest may want to apply WC, trust it. | 8606 | * 1. MMIO: always map as UC |
8619 | * 2. EPT with VT-d: | 8607 | * 2. EPT with VT-d: |
8620 | * a. VT-d without snooping control feature: can't guarantee the | 8608 | * a. VT-d without snooping control feature: can't guarantee the |
8621 | * result, try to trust guest. So the same as item 1. | 8609 | * result, try to trust guest. |
8622 | * b. VT-d with snooping control feature: snooping control feature of | 8610 | * b. VT-d with snooping control feature: snooping control feature of |
8623 | * VT-d engine can guarantee the cache correctness. Just set it | 8611 | * VT-d engine can guarantee the cache correctness. Just set it |
8624 | * to WB to keep consistent with host. So the same as item 3. | 8612 | * to WB to keep consistent with host. So the same as item 3. |
8625 | * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep | 8613 | * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep |
8626 | * consistent with host MTRR | 8614 | * consistent with host MTRR |
8627 | */ | 8615 | */ |
8628 | if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { | 8616 | if (is_mmio) { |
8617 | cache = MTRR_TYPE_UNCACHABLE; | ||
8618 | goto exit; | ||
8619 | } | ||
8620 | |||
8621 | if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { | ||
8629 | ipat = VMX_EPT_IPAT_BIT; | 8622 | ipat = VMX_EPT_IPAT_BIT; |
8630 | cache = MTRR_TYPE_WRBACK; | 8623 | cache = MTRR_TYPE_WRBACK; |
8631 | goto exit; | 8624 | goto exit; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a60bdbccff51..9a9a19830321 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -149,6 +149,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
149 | { "nmi_window", VCPU_STAT(nmi_window_exits) }, | 149 | { "nmi_window", VCPU_STAT(nmi_window_exits) }, |
150 | { "halt_exits", VCPU_STAT(halt_exits) }, | 150 | { "halt_exits", VCPU_STAT(halt_exits) }, |
151 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | 151 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
152 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, | ||
152 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 153 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
153 | { "hypercalls", VCPU_STAT(hypercalls) }, | 154 | { "hypercalls", VCPU_STAT(hypercalls) }, |
154 | { "request_irq", VCPU_STAT(request_irq_exits) }, | 155 | { "request_irq", VCPU_STAT(request_irq_exits) }, |
@@ -1707,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1707 | vcpu->pvclock_set_guest_stopped_request = false; | 1708 | vcpu->pvclock_set_guest_stopped_request = false; |
1708 | } | 1709 | } |
1709 | 1710 | ||
1710 | pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; | ||
1711 | |||
1712 | /* If the host uses TSC clocksource, then it is stable */ | 1711 | /* If the host uses TSC clocksource, then it is stable */ |
1713 | if (use_master_clock) | 1712 | if (use_master_clock) |
1714 | pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; | 1713 | pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; |
@@ -2006,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2006 | &vcpu->requests); | 2005 | &vcpu->requests); |
2007 | 2006 | ||
2008 | ka->boot_vcpu_runs_old_kvmclock = tmp; | 2007 | ka->boot_vcpu_runs_old_kvmclock = tmp; |
2009 | |||
2010 | ka->kvmclock_offset = -get_kernel_ns(); | ||
2011 | } | 2008 | } |
2012 | 2009 | ||
2013 | vcpu->arch.time = data; | 2010 | vcpu->arch.time = data; |
@@ -2189,6 +2186,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2189 | case MSR_IA32_LASTINTFROMIP: | 2186 | case MSR_IA32_LASTINTFROMIP: |
2190 | case MSR_IA32_LASTINTTOIP: | 2187 | case MSR_IA32_LASTINTTOIP: |
2191 | case MSR_K8_SYSCFG: | 2188 | case MSR_K8_SYSCFG: |
2189 | case MSR_K8_TSEG_ADDR: | ||
2190 | case MSR_K8_TSEG_MASK: | ||
2192 | case MSR_K7_HWCR: | 2191 | case MSR_K7_HWCR: |
2193 | case MSR_VM_HSAVE_PA: | 2192 | case MSR_VM_HSAVE_PA: |
2194 | case MSR_K8_INT_PENDING_MSG: | 2193 | case MSR_K8_INT_PENDING_MSG: |
@@ -6454,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) | |||
6454 | return 1; | 6453 | return 1; |
6455 | } | 6454 | } |
6456 | 6455 | ||
6456 | static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) | ||
6457 | { | ||
6458 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | ||
6459 | !vcpu->arch.apf.halted); | ||
6460 | } | ||
6461 | |||
6457 | static int vcpu_run(struct kvm_vcpu *vcpu) | 6462 | static int vcpu_run(struct kvm_vcpu *vcpu) |
6458 | { | 6463 | { |
6459 | int r; | 6464 | int r; |
@@ -6462,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) | |||
6462 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6467 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6463 | 6468 | ||
6464 | for (;;) { | 6469 | for (;;) { |
6465 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 6470 | if (kvm_vcpu_running(vcpu)) |
6466 | !vcpu->arch.apf.halted) | ||
6467 | r = vcpu_enter_guest(vcpu); | 6471 | r = vcpu_enter_guest(vcpu); |
6468 | else | 6472 | else |
6469 | r = vcpu_block(kvm, vcpu); | 6473 | r = vcpu_block(kvm, vcpu); |
@@ -7475,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm) | |||
7475 | kvm_free_pit(kvm); | 7479 | kvm_free_pit(kvm); |
7476 | } | 7480 | } |
7477 | 7481 | ||
7478 | int __x86_set_memory_region(struct kvm *kvm, | 7482 | int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) |
7479 | const struct kvm_userspace_memory_region *mem) | ||
7480 | { | 7483 | { |
7481 | int i, r; | 7484 | int i, r; |
7485 | unsigned long hva; | ||
7486 | struct kvm_memslots *slots = kvm_memslots(kvm); | ||
7487 | struct kvm_memory_slot *slot, old; | ||
7482 | 7488 | ||
7483 | /* Called with kvm->slots_lock held. */ | 7489 | /* Called with kvm->slots_lock held. */ |
7484 | BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); | 7490 | if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) |
7491 | return -EINVAL; | ||
7485 | 7492 | ||
7493 | slot = id_to_memslot(slots, id); | ||
7494 | if (size) { | ||
7495 | if (WARN_ON(slot->npages)) | ||
7496 | return -EEXIST; | ||
7497 | |||
7498 | /* | ||
7499 | * MAP_SHARED to prevent internal slot pages from being moved | ||
7500 | * by fork()/COW. | ||
7501 | */ | ||
7502 | hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, | ||
7503 | MAP_SHARED | MAP_ANONYMOUS, 0); | ||
7504 | if (IS_ERR((void *)hva)) | ||
7505 | return PTR_ERR((void *)hva); | ||
7506 | } else { | ||
7507 | if (!slot->npages) | ||
7508 | return 0; | ||
7509 | |||
7510 | hva = 0; | ||
7511 | } | ||
7512 | |||
7513 | old = *slot; | ||
7486 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { | 7514 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
7487 | struct kvm_userspace_memory_region m = *mem; | 7515 | struct kvm_userspace_memory_region m; |
7488 | 7516 | ||
7489 | m.slot |= i << 16; | 7517 | m.slot = id | (i << 16); |
7518 | m.flags = 0; | ||
7519 | m.guest_phys_addr = gpa; | ||
7520 | m.userspace_addr = hva; | ||
7521 | m.memory_size = size; | ||
7490 | r = __kvm_set_memory_region(kvm, &m); | 7522 | r = __kvm_set_memory_region(kvm, &m); |
7491 | if (r < 0) | 7523 | if (r < 0) |
7492 | return r; | 7524 | return r; |
7493 | } | 7525 | } |
7494 | 7526 | ||
7527 | if (!size) { | ||
7528 | r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); | ||
7529 | WARN_ON(r < 0); | ||
7530 | } | ||
7531 | |||
7495 | return 0; | 7532 | return 0; |
7496 | } | 7533 | } |
7497 | EXPORT_SYMBOL_GPL(__x86_set_memory_region); | 7534 | EXPORT_SYMBOL_GPL(__x86_set_memory_region); |
7498 | 7535 | ||
7499 | int x86_set_memory_region(struct kvm *kvm, | 7536 | int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) |
7500 | const struct kvm_userspace_memory_region *mem) | ||
7501 | { | 7537 | { |
7502 | int r; | 7538 | int r; |
7503 | 7539 | ||
7504 | mutex_lock(&kvm->slots_lock); | 7540 | mutex_lock(&kvm->slots_lock); |
7505 | r = __x86_set_memory_region(kvm, mem); | 7541 | r = __x86_set_memory_region(kvm, id, gpa, size); |
7506 | mutex_unlock(&kvm->slots_lock); | 7542 | mutex_unlock(&kvm->slots_lock); |
7507 | 7543 | ||
7508 | return r; | 7544 | return r; |
@@ -7517,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
7517 | * unless the the memory map has changed due to process exit | 7553 | * unless the the memory map has changed due to process exit |
7518 | * or fd copying. | 7554 | * or fd copying. |
7519 | */ | 7555 | */ |
7520 | struct kvm_userspace_memory_region mem; | 7556 | x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); |
7521 | memset(&mem, 0, sizeof(mem)); | 7557 | x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); |
7522 | mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | 7558 | x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); |
7523 | x86_set_memory_region(kvm, &mem); | ||
7524 | |||
7525 | mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; | ||
7526 | x86_set_memory_region(kvm, &mem); | ||
7527 | |||
7528 | mem.slot = TSS_PRIVATE_MEMSLOT; | ||
7529 | x86_set_memory_region(kvm, &mem); | ||
7530 | } | 7559 | } |
7531 | kvm_iommu_unmap_guest(kvm); | 7560 | kvm_iommu_unmap_guest(kvm); |
7532 | kfree(kvm->arch.vpic); | 7561 | kfree(kvm->arch.vpic); |
@@ -7629,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
7629 | const struct kvm_userspace_memory_region *mem, | 7658 | const struct kvm_userspace_memory_region *mem, |
7630 | enum kvm_mr_change change) | 7659 | enum kvm_mr_change change) |
7631 | { | 7660 | { |
7632 | /* | ||
7633 | * Only private memory slots need to be mapped here since | ||
7634 | * KVM_SET_MEMORY_REGION ioctl is no longer supported. | ||
7635 | */ | ||
7636 | if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { | ||
7637 | unsigned long userspace_addr; | ||
7638 | |||
7639 | /* | ||
7640 | * MAP_SHARED to prevent internal slot pages from being moved | ||
7641 | * by fork()/COW. | ||
7642 | */ | ||
7643 | userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, | ||
7644 | PROT_READ | PROT_WRITE, | ||
7645 | MAP_SHARED | MAP_ANONYMOUS, 0); | ||
7646 | |||
7647 | if (IS_ERR((void *)userspace_addr)) | ||
7648 | return PTR_ERR((void *)userspace_addr); | ||
7649 | |||
7650 | memslot->userspace_addr = userspace_addr; | ||
7651 | } | ||
7652 | |||
7653 | return 0; | 7661 | return 0; |
7654 | } | 7662 | } |
7655 | 7663 | ||
@@ -7711,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7711 | { | 7719 | { |
7712 | int nr_mmu_pages = 0; | 7720 | int nr_mmu_pages = 0; |
7713 | 7721 | ||
7714 | if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) { | ||
7715 | int ret; | ||
7716 | |||
7717 | ret = vm_munmap(old->userspace_addr, | ||
7718 | old->npages * PAGE_SIZE); | ||
7719 | if (ret < 0) | ||
7720 | printk(KERN_WARNING | ||
7721 | "kvm_vm_ioctl_set_memory_region: " | ||
7722 | "failed to munmap memory\n"); | ||
7723 | } | ||
7724 | |||
7725 | if (!kvm->arch.n_requested_mmu_pages) | 7722 | if (!kvm->arch.n_requested_mmu_pages) |
7726 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 7723 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); |
7727 | 7724 | ||
@@ -7770,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
7770 | kvm_mmu_invalidate_zap_all_pages(kvm); | 7767 | kvm_mmu_invalidate_zap_all_pages(kvm); |
7771 | } | 7768 | } |
7772 | 7769 | ||
7770 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | ||
7771 | { | ||
7772 | if (!list_empty_careful(&vcpu->async_pf.done)) | ||
7773 | return true; | ||
7774 | |||
7775 | if (kvm_apic_has_events(vcpu)) | ||
7776 | return true; | ||
7777 | |||
7778 | if (vcpu->arch.pv.pv_unhalted) | ||
7779 | return true; | ||
7780 | |||
7781 | if (atomic_read(&vcpu->arch.nmi_queued)) | ||
7782 | return true; | ||
7783 | |||
7784 | if (test_bit(KVM_REQ_SMI, &vcpu->requests)) | ||
7785 | return true; | ||
7786 | |||
7787 | if (kvm_arch_interrupt_allowed(vcpu) && | ||
7788 | kvm_cpu_has_interrupt(vcpu)) | ||
7789 | return true; | ||
7790 | |||
7791 | return false; | ||
7792 | } | ||
7793 | |||
7773 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 7794 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
7774 | { | 7795 | { |
7775 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) | 7796 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) |
7776 | kvm_x86_ops->check_nested_events(vcpu, false); | 7797 | kvm_x86_ops->check_nested_events(vcpu, false); |
7777 | 7798 | ||
7778 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 7799 | return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); |
7779 | !vcpu->arch.apf.halted) | ||
7780 | || !list_empty_careful(&vcpu->async_pf.done) | ||
7781 | || kvm_apic_has_events(vcpu) | ||
7782 | || vcpu->arch.pv.pv_unhalted | ||
7783 | || atomic_read(&vcpu->arch.nmi_queued) || | ||
7784 | (kvm_arch_interrupt_allowed(vcpu) && | ||
7785 | kvm_cpu_has_interrupt(vcpu)); | ||
7786 | } | 7800 | } |
7787 | 7801 | ||
7788 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | 7802 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 161804de124a..a0d09f6c6533 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -1015,7 +1015,7 @@ static struct clock_event_device lguest_clockevent = { | |||
1015 | * This is the Guest timer interrupt handler (hardware interrupt 0). We just | 1015 | * This is the Guest timer interrupt handler (hardware interrupt 0). We just |
1016 | * call the clockevent infrastructure and it does whatever needs doing. | 1016 | * call the clockevent infrastructure and it does whatever needs doing. |
1017 | */ | 1017 | */ |
1018 | static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | 1018 | static void lguest_time_irq(struct irq_desc *desc) |
1019 | { | 1019 | { |
1020 | unsigned long flags; | 1020 | unsigned long flags; |
1021 | 1021 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) | |||
1132 | * has been zapped already via cleanup_highmem(). | 1132 | * has been zapped already via cleanup_highmem(). |
1133 | */ | 1133 | */ |
1134 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); | 1134 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
1135 | set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); | 1135 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
1136 | 1136 | ||
1137 | rodata_test(); | 1137 | rodata_test(); |
1138 | 1138 | ||
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 66338a60aa6e..c2aea63bee20 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -192,10 +192,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
192 | 192 | ||
193 | node_set(node, numa_nodes_parsed); | 193 | node_set(node, numa_nodes_parsed); |
194 | 194 | ||
195 | pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n", | 195 | pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", |
196 | node, pxm, | 196 | node, pxm, |
197 | (unsigned long long) start, (unsigned long long) end - 1, | 197 | (unsigned long long) start, (unsigned long long) end - 1, |
198 | hotpluggable ? " hotplug" : ""); | 198 | hotpluggable ? " hotplug" : "", |
199 | ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); | ||
199 | 200 | ||
200 | /* Mark hotplug range in memblock. */ | 201 | /* Mark hotplug range in memblock. */ |
201 | if (hotpluggable && memblock_mark_hotplug(start, ma->length)) | 202 | if (hotpluggable && memblock_mark_hotplug(start, ma->length)) |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 09d3afc0a181..dc78a4a9a466 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -166,6 +166,7 @@ void pcibios_fixup_bus(struct pci_bus *b) | |||
166 | { | 166 | { |
167 | struct pci_dev *dev; | 167 | struct pci_dev *dev; |
168 | 168 | ||
169 | pci_read_bridge_bases(b); | ||
169 | list_for_each_entry(dev, &b->devices, bus_list) | 170 | list_for_each_entry(dev, &b->devices, bus_list) |
170 | pcibios_fixup_device_resources(dev); | 171 | pcibios_fixup_device_resources(dev); |
171 | } | 172 | } |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -705,6 +705,70 @@ out: | |||
705 | } | 705 | } |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * Iterate the EFI memory map in reverse order because the regions | ||
709 | * will be mapped top-down. The end result is the same as if we had | ||
710 | * mapped things forward, but doesn't require us to change the | ||
711 | * existing implementation of efi_map_region(). | ||
712 | */ | ||
713 | static inline void *efi_map_next_entry_reverse(void *entry) | ||
714 | { | ||
715 | /* Initial call */ | ||
716 | if (!entry) | ||
717 | return memmap.map_end - memmap.desc_size; | ||
718 | |||
719 | entry -= memmap.desc_size; | ||
720 | if (entry < memmap.map) | ||
721 | return NULL; | ||
722 | |||
723 | return entry; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * efi_map_next_entry - Return the next EFI memory map descriptor | ||
728 | * @entry: Previous EFI memory map descriptor | ||
729 | * | ||
730 | * This is a helper function to iterate over the EFI memory map, which | ||
731 | * we do in different orders depending on the current configuration. | ||
732 | * | ||
733 | * To begin traversing the memory map @entry must be %NULL. | ||
734 | * | ||
735 | * Returns %NULL when we reach the end of the memory map. | ||
736 | */ | ||
737 | static void *efi_map_next_entry(void *entry) | ||
738 | { | ||
739 | if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { | ||
740 | /* | ||
741 | * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE | ||
742 | * config table feature requires us to map all entries | ||
743 | * in the same order as they appear in the EFI memory | ||
744 | * map. That is to say, entry N must have a lower | ||
745 | * virtual address than entry N+1. This is because the | ||
746 | * firmware toolchain leaves relative references in | ||
747 | * the code/data sections, which are split and become | ||
748 | * separate EFI memory regions. Mapping things | ||
749 | * out-of-order leads to the firmware accessing | ||
750 | * unmapped addresses. | ||
751 | * | ||
752 | * Since we need to map things this way whether or not | ||
753 | * the kernel actually makes use of | ||
754 | * EFI_PROPERTIES_TABLE, let's just switch to this | ||
755 | * scheme by default for 64-bit. | ||
756 | */ | ||
757 | return efi_map_next_entry_reverse(entry); | ||
758 | } | ||
759 | |||
760 | /* Initial call */ | ||
761 | if (!entry) | ||
762 | return memmap.map; | ||
763 | |||
764 | entry += memmap.desc_size; | ||
765 | if (entry >= memmap.map_end) | ||
766 | return NULL; | ||
767 | |||
768 | return entry; | ||
769 | } | ||
770 | |||
771 | /* | ||
708 | * Map the efi memory ranges of the runtime services and update new_mmap with | 772 | * Map the efi memory ranges of the runtime services and update new_mmap with |
709 | * virtual addresses. | 773 | * virtual addresses. |
710 | */ | 774 | */ |
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) | |||
714 | unsigned long left = 0; | 778 | unsigned long left = 0; |
715 | efi_memory_desc_t *md; | 779 | efi_memory_desc_t *md; |
716 | 780 | ||
717 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 781 | p = NULL; |
782 | while ((p = efi_map_next_entry(p))) { | ||
718 | md = p; | 783 | md = p; |
719 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) { | 784 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) { |
720 | #ifdef CONFIG_X86_64 | 785 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 30d12afe52ed..993b7a71386d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -33,6 +33,10 @@ | |||
33 | #include <linux/memblock.h> | 33 | #include <linux/memblock.h> |
34 | #include <linux/edd.h> | 34 | #include <linux/edd.h> |
35 | 35 | ||
36 | #ifdef CONFIG_KEXEC_CORE | ||
37 | #include <linux/kexec.h> | ||
38 | #endif | ||
39 | |||
36 | #include <xen/xen.h> | 40 | #include <xen/xen.h> |
37 | #include <xen/events.h> | 41 | #include <xen/events.h> |
38 | #include <xen/interface/xen.h> | 42 | #include <xen/interface/xen.h> |
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
1077 | /* Fast syscall setup is all done in hypercalls, so | 1081 | /* Fast syscall setup is all done in hypercalls, so |
1078 | these are all ignored. Stub them out here to stop | 1082 | these are all ignored. Stub them out here to stop |
1079 | Xen console noise. */ | 1083 | Xen console noise. */ |
1084 | break; | ||
1080 | 1085 | ||
1081 | default: | 1086 | default: |
1082 | if (!pmu_msr_write(msr, low, high, &ret)) | 1087 | if (!pmu_msr_write(msr, low, high, &ret)) |
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = { | |||
1807 | .notifier_call = xen_hvm_cpu_notify, | 1812 | .notifier_call = xen_hvm_cpu_notify, |
1808 | }; | 1813 | }; |
1809 | 1814 | ||
1815 | #ifdef CONFIG_KEXEC_CORE | ||
1816 | static void xen_hvm_shutdown(void) | ||
1817 | { | ||
1818 | native_machine_shutdown(); | ||
1819 | if (kexec_in_progress) | ||
1820 | xen_reboot(SHUTDOWN_soft_reset); | ||
1821 | } | ||
1822 | |||
1823 | static void xen_hvm_crash_shutdown(struct pt_regs *regs) | ||
1824 | { | ||
1825 | native_machine_crash_shutdown(regs); | ||
1826 | xen_reboot(SHUTDOWN_soft_reset); | ||
1827 | } | ||
1828 | #endif | ||
1829 | |||
1810 | static void __init xen_hvm_guest_init(void) | 1830 | static void __init xen_hvm_guest_init(void) |
1811 | { | 1831 | { |
1812 | if (xen_pv_domain()) | 1832 | if (xen_pv_domain()) |
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void) | |||
1826 | x86_init.irqs.intr_init = xen_init_IRQ; | 1846 | x86_init.irqs.intr_init = xen_init_IRQ; |
1827 | xen_hvm_init_time_ops(); | 1847 | xen_hvm_init_time_ops(); |
1828 | xen_hvm_init_mmu_ops(); | 1848 | xen_hvm_init_mmu_ops(); |
1849 | #ifdef CONFIG_KEXEC_CORE | ||
1850 | machine_ops.shutdown = xen_hvm_shutdown; | ||
1851 | machine_ops.crash_shutdown = xen_hvm_crash_shutdown; | ||
1852 | #endif | ||
1829 | } | 1853 | } |
1830 | #endif | 1854 | #endif |
1831 | 1855 | ||
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index bfc08b13044b..660b3cfef234 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity; | |||
112 | static pte_t *p2m_missing_pte; | 112 | static pte_t *p2m_missing_pte; |
113 | static pte_t *p2m_identity_pte; | 113 | static pte_t *p2m_identity_pte; |
114 | 114 | ||
115 | /* | ||
116 | * Hint at last populated PFN. | ||
117 | * | ||
118 | * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack | ||
119 | * can avoid scanning the whole P2M (which may be sized to account for | ||
120 | * hotplugged memory). | ||
121 | */ | ||
122 | static unsigned long xen_p2m_last_pfn; | ||
123 | |||
115 | static inline unsigned p2m_top_index(unsigned long pfn) | 124 | static inline unsigned p2m_top_index(unsigned long pfn) |
116 | { | 125 | { |
117 | BUG_ON(pfn >= MAX_P2M_PFN); | 126 | BUG_ON(pfn >= MAX_P2M_PFN); |
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void) | |||
270 | else | 279 | else |
271 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = | 280 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = |
272 | virt_to_mfn(p2m_top_mfn); | 281 | virt_to_mfn(p2m_top_mfn); |
273 | HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; | 282 | HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; |
274 | HYPERVISOR_shared_info->arch.p2m_generation = 0; | 283 | HYPERVISOR_shared_info->arch.p2m_generation = 0; |
275 | HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; | 284 | HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; |
276 | HYPERVISOR_shared_info->arch.p2m_cr3 = | 285 | HYPERVISOR_shared_info->arch.p2m_cr3 = |
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void) | |||
406 | static struct vm_struct vm; | 415 | static struct vm_struct vm; |
407 | unsigned long p2m_limit; | 416 | unsigned long p2m_limit; |
408 | 417 | ||
418 | xen_p2m_last_pfn = xen_max_p2m_pfn; | ||
419 | |||
409 | p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; | 420 | p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; |
410 | vm.flags = VM_ALLOC; | 421 | vm.flags = VM_ALLOC; |
411 | vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), | 422 | vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), |
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn) | |||
608 | free_p2m_page(p2m); | 619 | free_p2m_page(p2m); |
609 | } | 620 | } |
610 | 621 | ||
622 | /* Expanded the p2m? */ | ||
623 | if (pfn > xen_p2m_last_pfn) { | ||
624 | xen_p2m_last_pfn = pfn; | ||
625 | HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; | ||
626 | } | ||
627 | |||
611 | return true; | 628 | return true; |
612 | } | 629 | } |
613 | 630 | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f5ef6746d47a..1c30e4ab1022 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void) | |||
548 | { | 548 | { |
549 | unsigned long max_pages, limit; | 549 | unsigned long max_pages, limit; |
550 | domid_t domid = DOMID_SELF; | 550 | domid_t domid = DOMID_SELF; |
551 | int ret; | 551 | long ret; |
552 | 552 | ||
553 | limit = xen_get_pages_limit(); | 553 | limit = xen_get_pages_limit(); |
554 | max_pages = limit; | 554 | max_pages = limit; |
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void) | |||
798 | xen_ignore_unusable(); | 798 | xen_ignore_unusable(); |
799 | 799 | ||
800 | /* Make sure the Xen-supplied memory map is well-ordered. */ | 800 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
801 | sanitize_e820_map(xen_e820_map, xen_e820_map_entries, | 801 | sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), |
802 | &xen_e820_map_entries); | 802 | &xen_e820_map_entries); |
803 | 803 | ||
804 | max_pages = xen_get_max_pages(); | 804 | max_pages = xen_get_max_pages(); |
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -28,4 +28,5 @@ generic-y += statfs.h | |||
28 | generic-y += termios.h | 28 | generic-y += termios.h |
29 | generic-y += topology.h | 29 | generic-y += topology.h |
30 | generic-y += trace_clock.h | 30 | generic-y += trace_clock.h |
31 | generic-y += word-at-a-time.h | ||
31 | generic-y += xor.h | 32 | generic-y += xor.h |
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index d27b4dcf221f..b848cc3dc913 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c | |||
@@ -210,6 +210,10 @@ subsys_initcall(pcibios_init); | |||
210 | 210 | ||
211 | void pcibios_fixup_bus(struct pci_bus *bus) | 211 | void pcibios_fixup_bus(struct pci_bus *bus) |
212 | { | 212 | { |
213 | if (bus->parent) { | ||
214 | /* This is a subordinate bridge */ | ||
215 | pci_read_bridge_bases(bus); | ||
216 | } | ||
213 | } | 217 | } |
214 | 218 | ||
215 | void pcibios_set_master(struct pci_dev *dev) | 219 | void pcibios_set_master(struct pci_dev *dev) |
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 4aecca79374a..14b8faf8b09d 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
140 | 140 | ||
141 | iv = bip->bip_vec + bip->bip_vcnt; | 141 | iv = bip->bip_vec + bip->bip_vcnt; |
142 | 142 | ||
143 | if (bip->bip_vcnt && | ||
144 | bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), | ||
145 | &bip->bip_vec[bip->bip_vcnt - 1], offset)) | ||
146 | return 0; | ||
147 | |||
143 | iv->bv_page = page; | 148 | iv->bv_page = page; |
144 | iv->bv_len = len; | 149 | iv->bv_len = len; |
145 | iv->bv_offset = offset; | 150 | iv->bv_offset = offset; |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ac8370cb2515..55512dd62633 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q) | |||
370 | blkg_destroy(blkg); | 370 | blkg_destroy(blkg); |
371 | spin_unlock(&blkcg->lock); | 371 | spin_unlock(&blkcg->lock); |
372 | } | 372 | } |
373 | |||
374 | q->root_blkg = NULL; | ||
375 | q->root_rl.blkg = NULL; | ||
373 | } | 376 | } |
374 | 377 | ||
375 | /* | 378 | /* |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index f548b64be092..75f29cf70188 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, | |||
204 | q->limits.max_integrity_segments) | 204 | q->limits.max_integrity_segments) |
205 | return false; | 205 | return false; |
206 | 206 | ||
207 | if (integrity_req_gap_back_merge(req, next->bio)) | ||
208 | return false; | ||
209 | |||
207 | return true; | 210 | return true; |
208 | } | 211 | } |
209 | EXPORT_SYMBOL(blk_integrity_merge_rq); | 212 | EXPORT_SYMBOL(blk_integrity_merge_rq); |
diff --git a/block/blk-map.c b/block/blk-map.c index 233841644c9d..f565e11f465a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -9,6 +9,24 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static bool iovec_gap_to_prv(struct request_queue *q, | ||
13 | struct iovec *prv, struct iovec *cur) | ||
14 | { | ||
15 | unsigned long prev_end; | ||
16 | |||
17 | if (!queue_virt_boundary(q)) | ||
18 | return false; | ||
19 | |||
20 | if (prv->iov_base == NULL && prv->iov_len == 0) | ||
21 | /* prv is not set - don't check */ | ||
22 | return false; | ||
23 | |||
24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); | ||
25 | |||
26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || | ||
27 | prev_end & queue_virt_boundary(q)); | ||
28 | } | ||
29 | |||
12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
13 | struct bio *bio) | 31 | struct bio *bio) |
14 | { | 32 | { |
@@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
67 | struct bio *bio; | 85 | struct bio *bio; |
68 | int unaligned = 0; | 86 | int unaligned = 0; |
69 | struct iov_iter i; | 87 | struct iov_iter i; |
70 | struct iovec iov; | 88 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; |
71 | 89 | ||
72 | if (!iter || !iter->count) | 90 | if (!iter || !iter->count) |
73 | return -EINVAL; | 91 | return -EINVAL; |
@@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
81 | /* | 99 | /* |
82 | * Keep going so we check length of all segments | 100 | * Keep going so we check length of all segments |
83 | */ | 101 | */ |
84 | if (uaddr & queue_dma_alignment(q)) | 102 | if ((uaddr & queue_dma_alignment(q)) || |
103 | iovec_gap_to_prv(q, &prv, &iov)) | ||
85 | unaligned = 1; | 104 | unaligned = 1; |
105 | |||
106 | prv.iov_base = iov.iov_base; | ||
107 | prv.iov_len = iov.iov_len; | ||
86 | } | 108 | } |
87 | 109 | ||
88 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) | 110 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index d088cffb8105..c4e9c37f3e38 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
66 | struct bio *bio, | 66 | struct bio *bio, |
67 | struct bio_set *bs) | 67 | struct bio_set *bs) |
68 | { | 68 | { |
69 | struct bio *split; | 69 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
70 | struct bio_vec bv, bvprv; | ||
71 | struct bvec_iter iter; | 70 | struct bvec_iter iter; |
72 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 71 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
73 | int prev = 0; | ||
74 | 72 | ||
75 | bio_for_each_segment(bv, bio, iter) { | 73 | bio_for_each_segment(bv, bio, iter) { |
76 | sectors += bv.bv_len >> 9; | 74 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
77 | |||
78 | if (sectors > queue_max_sectors(q)) | ||
79 | goto split; | 75 | goto split; |
80 | 76 | ||
81 | /* | 77 | /* |
82 | * If the queue doesn't support SG gaps and adding this | 78 | * If the queue doesn't support SG gaps and adding this |
83 | * offset would create a gap, disallow it. | 79 | * offset would create a gap, disallow it. |
84 | */ | 80 | */ |
85 | if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset)) | 81 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
86 | goto split; | 82 | goto split; |
87 | 83 | ||
88 | if (prev && blk_queue_cluster(q)) { | 84 | if (bvprvp && blk_queue_cluster(q)) { |
89 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | 85 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
90 | goto new_segment; | 86 | goto new_segment; |
91 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | 87 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
92 | goto new_segment; | 88 | goto new_segment; |
93 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | 89 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
94 | goto new_segment; | 90 | goto new_segment; |
95 | 91 | ||
96 | seg_size += bv.bv_len; | 92 | seg_size += bv.bv_len; |
97 | bvprv = bv; | 93 | bvprv = bv; |
98 | prev = 1; | 94 | bvprvp = &bv; |
95 | sectors += bv.bv_len >> 9; | ||
99 | continue; | 96 | continue; |
100 | } | 97 | } |
101 | new_segment: | 98 | new_segment: |
@@ -104,23 +101,14 @@ new_segment: | |||
104 | 101 | ||
105 | nsegs++; | 102 | nsegs++; |
106 | bvprv = bv; | 103 | bvprv = bv; |
107 | prev = 1; | 104 | bvprvp = &bv; |
108 | seg_size = bv.bv_len; | 105 | seg_size = bv.bv_len; |
106 | sectors += bv.bv_len >> 9; | ||
109 | } | 107 | } |
110 | 108 | ||
111 | return NULL; | 109 | return NULL; |
112 | split: | 110 | split: |
113 | split = bio_clone_bioset(bio, GFP_NOIO, bs); | 111 | return bio_split(bio, sectors, GFP_NOIO, bs); |
114 | |||
115 | split->bi_iter.bi_size -= iter.bi_size; | ||
116 | bio->bi_iter = iter; | ||
117 | |||
118 | if (bio_integrity(bio)) { | ||
119 | bio_integrity_advance(bio, split->bi_iter.bi_size); | ||
120 | bio_integrity_trim(split, 0, bio_sectors(split)); | ||
121 | } | ||
122 | |||
123 | return split; | ||
124 | } | 112 | } |
125 | 113 | ||
126 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 114 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
@@ -439,6 +427,11 @@ no_merge: | |||
439 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | 427 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
440 | struct bio *bio) | 428 | struct bio *bio) |
441 | { | 429 | { |
430 | if (req_gap_back_merge(req, bio)) | ||
431 | return 0; | ||
432 | if (blk_integrity_rq(req) && | ||
433 | integrity_req_gap_back_merge(req, bio)) | ||
434 | return 0; | ||
442 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 435 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
443 | blk_rq_get_max_sectors(req)) { | 436 | blk_rq_get_max_sectors(req)) { |
444 | req->cmd_flags |= REQ_NOMERGE; | 437 | req->cmd_flags |= REQ_NOMERGE; |
@@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
457 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | 450 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
458 | struct bio *bio) | 451 | struct bio *bio) |
459 | { | 452 | { |
453 | |||
454 | if (req_gap_front_merge(req, bio)) | ||
455 | return 0; | ||
456 | if (blk_integrity_rq(req) && | ||
457 | integrity_req_gap_front_merge(req, bio)) | ||
458 | return 0; | ||
460 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 459 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
461 | blk_rq_get_max_sectors(req)) { | 460 | blk_rq_get_max_sectors(req)) { |
462 | req->cmd_flags |= REQ_NOMERGE; | 461 | req->cmd_flags |= REQ_NOMERGE; |
@@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req) | |||
483 | return !q->mq_ops && req->special; | 482 | return !q->mq_ops && req->special; |
484 | } | 483 | } |
485 | 484 | ||
486 | static int req_gap_to_prev(struct request *req, struct bio *next) | ||
487 | { | ||
488 | struct bio *prev = req->biotail; | ||
489 | |||
490 | return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
491 | next->bi_io_vec[0].bv_offset); | ||
492 | } | ||
493 | |||
494 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | 485 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
495 | struct request *next) | 486 | struct request *next) |
496 | { | 487 | { |
@@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
505 | if (req_no_special_merge(req) || req_no_special_merge(next)) | 496 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
506 | return 0; | 497 | return 0; |
507 | 498 | ||
508 | if (req_gap_to_prev(req, next->bio)) | 499 | if (req_gap_back_merge(req, next->bio)) |
509 | return 0; | 500 | return 0; |
510 | 501 | ||
511 | /* | 502 | /* |
@@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
713 | !blk_write_same_mergeable(rq->bio, bio)) | 704 | !blk_write_same_mergeable(rq->bio, bio)) |
714 | return false; | 705 | return false; |
715 | 706 | ||
716 | /* Only check gaps if the bio carries data */ | ||
717 | if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) | ||
718 | return false; | ||
719 | |||
720 | return true; | 707 | return true; |
721 | } | 708 | } |
722 | 709 | ||
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c | |||
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) | |||
31 | return cpu; | 31 | return cpu; |
32 | } | 32 | } |
33 | 33 | ||
34 | int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | 34 | int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, |
35 | const struct cpumask *online_mask) | ||
35 | { | 36 | { |
36 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; | 37 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; |
37 | cpumask_var_t cpus; | 38 | cpumask_var_t cpus; |
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | |||
41 | 42 | ||
42 | cpumask_clear(cpus); | 43 | cpumask_clear(cpus); |
43 | nr_cpus = nr_uniq_cpus = 0; | 44 | nr_cpus = nr_uniq_cpus = 0; |
44 | for_each_online_cpu(i) { | 45 | for_each_cpu(i, online_mask) { |
45 | nr_cpus++; | 46 | nr_cpus++; |
46 | first_sibling = get_first_sibling(i); | 47 | first_sibling = get_first_sibling(i); |
47 | if (!cpumask_test_cpu(first_sibling, cpus)) | 48 | if (!cpumask_test_cpu(first_sibling, cpus)) |
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | |||
51 | 52 | ||
52 | queue = 0; | 53 | queue = 0; |
53 | for_each_possible_cpu(i) { | 54 | for_each_possible_cpu(i) { |
54 | if (!cpu_online(i)) { | 55 | if (!cpumask_test_cpu(i, online_mask)) { |
55 | map[i] = 0; | 56 | map[i] = 0; |
56 | continue; | 57 | continue; |
57 | } | 58 | } |
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) | |||
95 | if (!map) | 96 | if (!map) |
96 | return NULL; | 97 | return NULL; |
97 | 98 | ||
98 | if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) | 99 | if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) |
99 | return map; | 100 | return map; |
100 | 101 | ||
101 | kfree(map); | 102 | kfree(map); |
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) | |||
229 | unsigned int i, first = 1; | 229 | unsigned int i, first = 1; |
230 | ssize_t ret = 0; | 230 | ssize_t ret = 0; |
231 | 231 | ||
232 | blk_mq_disable_hotplug(); | ||
233 | |||
234 | for_each_cpu(i, hctx->cpumask) { | 232 | for_each_cpu(i, hctx->cpumask) { |
235 | if (first) | 233 | if (first) |
236 | ret += sprintf(ret + page, "%u", i); | 234 | ret += sprintf(ret + page, "%u", i); |
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) | |||
240 | first = 0; | 238 | first = 0; |
241 | } | 239 | } |
242 | 240 | ||
243 | blk_mq_enable_hotplug(); | ||
244 | |||
245 | ret += sprintf(ret + page, "\n"); | 241 | ret += sprintf(ret + page, "\n"); |
246 | return ret; | 242 | return ret; |
247 | } | 243 | } |
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) | |||
343 | struct blk_mq_ctx *ctx; | 339 | struct blk_mq_ctx *ctx; |
344 | int i; | 340 | int i; |
345 | 341 | ||
346 | if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) | 342 | if (!hctx->nr_ctx) |
347 | return; | 343 | return; |
348 | 344 | ||
349 | hctx_for_each_ctx(hctx, ctx, i) | 345 | hctx_for_each_ctx(hctx, ctx, i) |
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) | |||
358 | struct blk_mq_ctx *ctx; | 354 | struct blk_mq_ctx *ctx; |
359 | int i, ret; | 355 | int i, ret; |
360 | 356 | ||
361 | if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) | 357 | if (!hctx->nr_ctx) |
362 | return 0; | 358 | return 0; |
363 | 359 | ||
364 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); | 360 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); |
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) | |||
381 | struct blk_mq_ctx *ctx; | 377 | struct blk_mq_ctx *ctx; |
382 | int i, j; | 378 | int i, j; |
383 | 379 | ||
380 | blk_mq_disable_hotplug(); | ||
381 | |||
384 | queue_for_each_hw_ctx(q, hctx, i) { | 382 | queue_for_each_hw_ctx(q, hctx, i) { |
385 | blk_mq_unregister_hctx(hctx); | 383 | blk_mq_unregister_hctx(hctx); |
386 | 384 | ||
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) | |||
395 | kobject_put(&q->mq_kobj); | 393 | kobject_put(&q->mq_kobj); |
396 | 394 | ||
397 | kobject_put(&disk_to_dev(disk)->kobj); | 395 | kobject_put(&disk_to_dev(disk)->kobj); |
396 | |||
397 | q->mq_sysfs_init_done = false; | ||
398 | blk_mq_enable_hotplug(); | ||
398 | } | 399 | } |
399 | 400 | ||
400 | static void blk_mq_sysfs_init(struct request_queue *q) | 401 | static void blk_mq_sysfs_init(struct request_queue *q) |
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) | |||
425 | struct blk_mq_hw_ctx *hctx; | 426 | struct blk_mq_hw_ctx *hctx; |
426 | int ret, i; | 427 | int ret, i; |
427 | 428 | ||
429 | blk_mq_disable_hotplug(); | ||
430 | |||
428 | blk_mq_sysfs_init(q); | 431 | blk_mq_sysfs_init(q); |
429 | 432 | ||
430 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); | 433 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
431 | if (ret < 0) | 434 | if (ret < 0) |
432 | return ret; | 435 | goto out; |
433 | 436 | ||
434 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); | 437 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); |
435 | 438 | ||
436 | queue_for_each_hw_ctx(q, hctx, i) { | 439 | queue_for_each_hw_ctx(q, hctx, i) { |
437 | hctx->flags |= BLK_MQ_F_SYSFS_UP; | ||
438 | ret = blk_mq_register_hctx(hctx); | 440 | ret = blk_mq_register_hctx(hctx); |
439 | if (ret) | 441 | if (ret) |
440 | break; | 442 | break; |
441 | } | 443 | } |
442 | 444 | ||
443 | if (ret) { | 445 | if (ret) |
444 | blk_mq_unregister_disk(disk); | 446 | blk_mq_unregister_disk(disk); |
445 | return ret; | 447 | else |
446 | } | 448 | q->mq_sysfs_init_done = true; |
449 | out: | ||
450 | blk_mq_enable_hotplug(); | ||
447 | 451 | ||
448 | return 0; | 452 | return ret; |
449 | } | 453 | } |
450 | EXPORT_SYMBOL_GPL(blk_mq_register_disk); | 454 | EXPORT_SYMBOL_GPL(blk_mq_register_disk); |
451 | 455 | ||
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) | |||
454 | struct blk_mq_hw_ctx *hctx; | 458 | struct blk_mq_hw_ctx *hctx; |
455 | int i; | 459 | int i; |
456 | 460 | ||
461 | if (!q->mq_sysfs_init_done) | ||
462 | return; | ||
463 | |||
457 | queue_for_each_hw_ctx(q, hctx, i) | 464 | queue_for_each_hw_ctx(q, hctx, i) |
458 | blk_mq_unregister_hctx(hctx); | 465 | blk_mq_unregister_hctx(hctx); |
459 | } | 466 | } |
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) | |||
463 | struct blk_mq_hw_ctx *hctx; | 470 | struct blk_mq_hw_ctx *hctx; |
464 | int i, ret = 0; | 471 | int i, ret = 0; |
465 | 472 | ||
473 | if (!q->mq_sysfs_init_done) | ||
474 | return ret; | ||
475 | |||
466 | queue_for_each_hw_ctx(q, hctx, i) { | 476 | queue_for_each_hw_ctx(q, hctx, i) { |
467 | ret = blk_mq_register_hctx(hctx); | 477 | ret = blk_mq_register_hctx(hctx); |
468 | if (ret) | 478 | if (ret) |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ed96474d75cb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | |||
471 | } | 471 | } |
472 | EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); | 472 | EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); |
473 | 473 | ||
474 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 474 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
475 | void *priv) | 475 | void *priv) |
476 | { | 476 | { |
477 | struct blk_mq_tags *tags = hctx->tags; | 477 | struct blk_mq_hw_ctx *hctx; |
478 | int i; | ||
479 | |||
480 | |||
481 | queue_for_each_hw_ctx(q, hctx, i) { | ||
482 | struct blk_mq_tags *tags = hctx->tags; | ||
483 | |||
484 | /* | ||
485 | * If not software queues are currently mapped to this | ||
486 | * hardware queue, there's nothing to check | ||
487 | */ | ||
488 | if (!blk_mq_hw_queue_mapped(hctx)) | ||
489 | continue; | ||
490 | |||
491 | if (tags->nr_reserved_tags) | ||
492 | bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); | ||
493 | bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | ||
494 | false); | ||
495 | } | ||
478 | 496 | ||
479 | if (tags->nr_reserved_tags) | ||
480 | bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); | ||
481 | bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | ||
482 | false); | ||
483 | } | 497 | } |
484 | EXPORT_SYMBOL(blk_mq_tag_busy_iter); | ||
485 | 498 | ||
486 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) | 499 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
487 | { | 500 | { |
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | |||
58 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); | 58 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); |
59 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); | 59 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); |
60 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); | 60 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); |
61 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | ||
62 | void *priv); | ||
61 | 63 | ||
62 | enum { | 64 | enum { |
63 | BLK_MQ_TAG_CACHE_MIN = 1, | 65 | BLK_MQ_TAG_CACHE_MIN = 1, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..7785ae96267a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) | |||
393 | * Ends all I/O on a request. It does not handle partial completions. | 393 | * Ends all I/O on a request. It does not handle partial completions. |
394 | * The actual completion happens out-of-order, through a IPI handler. | 394 | * The actual completion happens out-of-order, through a IPI handler. |
395 | **/ | 395 | **/ |
396 | void blk_mq_complete_request(struct request *rq) | 396 | void blk_mq_complete_request(struct request *rq, int error) |
397 | { | 397 | { |
398 | struct request_queue *q = rq->q; | 398 | struct request_queue *q = rq->q; |
399 | 399 | ||
400 | if (unlikely(blk_should_fake_timeout(q))) | 400 | if (unlikely(blk_should_fake_timeout(q))) |
401 | return; | 401 | return; |
402 | if (!blk_mark_rq_complete(rq)) | 402 | if (!blk_mark_rq_complete(rq)) { |
403 | rq->errors = error; | ||
403 | __blk_mq_complete_request(rq); | 404 | __blk_mq_complete_request(rq); |
405 | } | ||
404 | } | 406 | } |
405 | EXPORT_SYMBOL(blk_mq_complete_request); | 407 | EXPORT_SYMBOL(blk_mq_complete_request); |
406 | 408 | ||
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
616 | * If a request wasn't started before the queue was | 618 | * If a request wasn't started before the queue was |
617 | * marked dying, kill it here or it'll go unnoticed. | 619 | * marked dying, kill it here or it'll go unnoticed. |
618 | */ | 620 | */ |
619 | if (unlikely(blk_queue_dying(rq->q))) { | 621 | if (unlikely(blk_queue_dying(rq->q))) |
620 | rq->errors = -EIO; | 622 | blk_mq_complete_request(rq, -EIO); |
621 | blk_mq_complete_request(rq); | ||
622 | } | ||
623 | return; | 623 | return; |
624 | } | 624 | } |
625 | if (rq->cmd_flags & REQ_NO_TIMEOUT) | 625 | if (rq->cmd_flags & REQ_NO_TIMEOUT) |
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) | |||
641 | .next = 0, | 641 | .next = 0, |
642 | .next_set = 0, | 642 | .next_set = 0, |
643 | }; | 643 | }; |
644 | struct blk_mq_hw_ctx *hctx; | ||
645 | int i; | 644 | int i; |
646 | 645 | ||
647 | queue_for_each_hw_ctx(q, hctx, i) { | 646 | blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); |
648 | /* | ||
649 | * If not software queues are currently mapped to this | ||
650 | * hardware queue, there's nothing to check | ||
651 | */ | ||
652 | if (!blk_mq_hw_queue_mapped(hctx)) | ||
653 | continue; | ||
654 | |||
655 | blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); | ||
656 | } | ||
657 | 647 | ||
658 | if (data.next_set) { | 648 | if (data.next_set) { |
659 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); | 649 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); |
660 | mod_timer(&q->timeout, data.next); | 650 | mod_timer(&q->timeout, data.next); |
661 | } else { | 651 | } else { |
652 | struct blk_mq_hw_ctx *hctx; | ||
653 | |||
662 | queue_for_each_hw_ctx(q, hctx, i) { | 654 | queue_for_each_hw_ctx(q, hctx, i) { |
663 | /* the hctx may be unmapped, so check it here */ | 655 | /* the hctx may be unmapped, so check it here */ |
664 | if (blk_mq_hw_queue_mapped(hctx)) | 656 | if (blk_mq_hw_queue_mapped(hctx)) |
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, | |||
1789 | } | 1781 | } |
1790 | } | 1782 | } |
1791 | 1783 | ||
1792 | static void blk_mq_map_swqueue(struct request_queue *q) | 1784 | static void blk_mq_map_swqueue(struct request_queue *q, |
1785 | const struct cpumask *online_mask) | ||
1793 | { | 1786 | { |
1794 | unsigned int i; | 1787 | unsigned int i; |
1795 | struct blk_mq_hw_ctx *hctx; | 1788 | struct blk_mq_hw_ctx *hctx; |
1796 | struct blk_mq_ctx *ctx; | 1789 | struct blk_mq_ctx *ctx; |
1797 | struct blk_mq_tag_set *set = q->tag_set; | 1790 | struct blk_mq_tag_set *set = q->tag_set; |
1798 | 1791 | ||
1792 | /* | ||
1793 | * Avoid others reading imcomplete hctx->cpumask through sysfs | ||
1794 | */ | ||
1795 | mutex_lock(&q->sysfs_lock); | ||
1796 | |||
1799 | queue_for_each_hw_ctx(q, hctx, i) { | 1797 | queue_for_each_hw_ctx(q, hctx, i) { |
1800 | cpumask_clear(hctx->cpumask); | 1798 | cpumask_clear(hctx->cpumask); |
1801 | hctx->nr_ctx = 0; | 1799 | hctx->nr_ctx = 0; |
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1806 | */ | 1804 | */ |
1807 | queue_for_each_ctx(q, ctx, i) { | 1805 | queue_for_each_ctx(q, ctx, i) { |
1808 | /* If the cpu isn't online, the cpu is mapped to first hctx */ | 1806 | /* If the cpu isn't online, the cpu is mapped to first hctx */ |
1809 | if (!cpu_online(i)) | 1807 | if (!cpumask_test_cpu(i, online_mask)) |
1810 | continue; | 1808 | continue; |
1811 | 1809 | ||
1812 | hctx = q->mq_ops->map_queue(q, i); | 1810 | hctx = q->mq_ops->map_queue(q, i); |
1813 | cpumask_set_cpu(i, hctx->cpumask); | 1811 | cpumask_set_cpu(i, hctx->cpumask); |
1814 | cpumask_set_cpu(i, hctx->tags->cpumask); | ||
1815 | ctx->index_hw = hctx->nr_ctx; | 1812 | ctx->index_hw = hctx->nr_ctx; |
1816 | hctx->ctxs[hctx->nr_ctx++] = ctx; | 1813 | hctx->ctxs[hctx->nr_ctx++] = ctx; |
1817 | } | 1814 | } |
1818 | 1815 | ||
1816 | mutex_unlock(&q->sysfs_lock); | ||
1817 | |||
1819 | queue_for_each_hw_ctx(q, hctx, i) { | 1818 | queue_for_each_hw_ctx(q, hctx, i) { |
1820 | struct blk_mq_ctxmap *map = &hctx->ctx_map; | 1819 | struct blk_mq_ctxmap *map = &hctx->ctx_map; |
1821 | 1820 | ||
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1851 | hctx->next_cpu = cpumask_first(hctx->cpumask); | 1850 | hctx->next_cpu = cpumask_first(hctx->cpumask); |
1852 | hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; | 1851 | hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; |
1853 | } | 1852 | } |
1853 | |||
1854 | queue_for_each_ctx(q, ctx, i) { | ||
1855 | if (!cpumask_test_cpu(i, online_mask)) | ||
1856 | continue; | ||
1857 | |||
1858 | hctx = q->mq_ops->map_queue(q, i); | ||
1859 | cpumask_set_cpu(i, hctx->tags->cpumask); | ||
1860 | } | ||
1854 | } | 1861 | } |
1855 | 1862 | ||
1856 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) | 1863 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) |
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) | |||
1918 | kfree(hctx); | 1925 | kfree(hctx); |
1919 | } | 1926 | } |
1920 | 1927 | ||
1928 | kfree(q->mq_map); | ||
1929 | q->mq_map = NULL; | ||
1930 | |||
1921 | kfree(q->queue_hw_ctx); | 1931 | kfree(q->queue_hw_ctx); |
1922 | 1932 | ||
1923 | /* ctx kobj stays in queue_ctx */ | 1933 | /* ctx kobj stays in queue_ctx */ |
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
2027 | if (blk_mq_init_hw_queues(q, set)) | 2037 | if (blk_mq_init_hw_queues(q, set)) |
2028 | goto err_hctxs; | 2038 | goto err_hctxs; |
2029 | 2039 | ||
2040 | get_online_cpus(); | ||
2030 | mutex_lock(&all_q_mutex); | 2041 | mutex_lock(&all_q_mutex); |
2031 | list_add_tail(&q->all_q_node, &all_q_list); | ||
2032 | mutex_unlock(&all_q_mutex); | ||
2033 | 2042 | ||
2043 | list_add_tail(&q->all_q_node, &all_q_list); | ||
2034 | blk_mq_add_queue_tag_set(set, q); | 2044 | blk_mq_add_queue_tag_set(set, q); |
2045 | blk_mq_map_swqueue(q, cpu_online_mask); | ||
2035 | 2046 | ||
2036 | blk_mq_map_swqueue(q); | 2047 | mutex_unlock(&all_q_mutex); |
2048 | put_online_cpus(); | ||
2037 | 2049 | ||
2038 | return q; | 2050 | return q; |
2039 | 2051 | ||
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) | |||
2057 | { | 2069 | { |
2058 | struct blk_mq_tag_set *set = q->tag_set; | 2070 | struct blk_mq_tag_set *set = q->tag_set; |
2059 | 2071 | ||
2072 | mutex_lock(&all_q_mutex); | ||
2073 | list_del_init(&q->all_q_node); | ||
2074 | mutex_unlock(&all_q_mutex); | ||
2075 | |||
2060 | blk_mq_del_queue_tag_set(q); | 2076 | blk_mq_del_queue_tag_set(q); |
2061 | 2077 | ||
2062 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); | 2078 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); |
2063 | blk_mq_free_hw_queues(q, set); | 2079 | blk_mq_free_hw_queues(q, set); |
2064 | 2080 | ||
2065 | percpu_ref_exit(&q->mq_usage_counter); | 2081 | percpu_ref_exit(&q->mq_usage_counter); |
2066 | |||
2067 | kfree(q->mq_map); | ||
2068 | |||
2069 | q->mq_map = NULL; | ||
2070 | |||
2071 | mutex_lock(&all_q_mutex); | ||
2072 | list_del_init(&q->all_q_node); | ||
2073 | mutex_unlock(&all_q_mutex); | ||
2074 | } | 2082 | } |
2075 | 2083 | ||
2076 | /* Basically redo blk_mq_init_queue with queue frozen */ | 2084 | /* Basically redo blk_mq_init_queue with queue frozen */ |
2077 | static void blk_mq_queue_reinit(struct request_queue *q) | 2085 | static void blk_mq_queue_reinit(struct request_queue *q, |
2086 | const struct cpumask *online_mask) | ||
2078 | { | 2087 | { |
2079 | WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); | 2088 | WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); |
2080 | 2089 | ||
2081 | blk_mq_sysfs_unregister(q); | 2090 | blk_mq_sysfs_unregister(q); |
2082 | 2091 | ||
2083 | blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); | 2092 | blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); |
2084 | 2093 | ||
2085 | /* | 2094 | /* |
2086 | * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe | 2095 | * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe |
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) | |||
2088 | * involves free and re-allocate memory, worthy doing?) | 2097 | * involves free and re-allocate memory, worthy doing?) |
2089 | */ | 2098 | */ |
2090 | 2099 | ||
2091 | blk_mq_map_swqueue(q); | 2100 | blk_mq_map_swqueue(q, online_mask); |
2092 | 2101 | ||
2093 | blk_mq_sysfs_register(q); | 2102 | blk_mq_sysfs_register(q); |
2094 | } | 2103 | } |
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2097 | unsigned long action, void *hcpu) | 2106 | unsigned long action, void *hcpu) |
2098 | { | 2107 | { |
2099 | struct request_queue *q; | 2108 | struct request_queue *q; |
2109 | int cpu = (unsigned long)hcpu; | ||
2110 | /* | ||
2111 | * New online cpumask which is going to be set in this hotplug event. | ||
2112 | * Declare this cpumasks as global as cpu-hotplug operation is invoked | ||
2113 | * one-by-one and dynamically allocating this could result in a failure. | ||
2114 | */ | ||
2115 | static struct cpumask online_new; | ||
2100 | 2116 | ||
2101 | /* | 2117 | /* |
2102 | * Before new mappings are established, hotadded cpu might already | 2118 | * Before hotadded cpu starts handling requests, new mappings must |
2103 | * start handling requests. This doesn't break anything as we map | 2119 | * be established. Otherwise, these requests in hw queue might |
2104 | * offline CPUs to first hardware queue. We will re-init the queue | 2120 | * never be dispatched. |
2105 | * below to get optimal settings. | 2121 | * |
2122 | * For example, there is a single hw queue (hctx) and two CPU queues | ||
2123 | * (ctx0 for CPU0, and ctx1 for CPU1). | ||
2124 | * | ||
2125 | * Now CPU1 is just onlined and a request is inserted into | ||
2126 | * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is | ||
2127 | * still zero. | ||
2128 | * | ||
2129 | * And then while running hw queue, flush_busy_ctxs() finds bit0 is | ||
2130 | * set in pending bitmap and tries to retrieve requests in | ||
2131 | * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, | ||
2132 | * so the request in ctx1->rq_list is ignored. | ||
2106 | */ | 2133 | */ |
2107 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && | 2134 | switch (action & ~CPU_TASKS_FROZEN) { |
2108 | action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) | 2135 | case CPU_DEAD: |
2136 | case CPU_UP_CANCELED: | ||
2137 | cpumask_copy(&online_new, cpu_online_mask); | ||
2138 | break; | ||
2139 | case CPU_UP_PREPARE: | ||
2140 | cpumask_copy(&online_new, cpu_online_mask); | ||
2141 | cpumask_set_cpu(cpu, &online_new); | ||
2142 | break; | ||
2143 | default: | ||
2109 | return NOTIFY_OK; | 2144 | return NOTIFY_OK; |
2145 | } | ||
2110 | 2146 | ||
2111 | mutex_lock(&all_q_mutex); | 2147 | mutex_lock(&all_q_mutex); |
2112 | 2148 | ||
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2130 | } | 2166 | } |
2131 | 2167 | ||
2132 | list_for_each_entry(q, &all_q_list, all_q_node) | 2168 | list_for_each_entry(q, &all_q_list, all_q_node) |
2133 | blk_mq_queue_reinit(q); | 2169 | blk_mq_queue_reinit(q, &online_new); |
2134 | 2170 | ||
2135 | list_for_each_entry(q, &all_q_list, all_q_node) | 2171 | list_for_each_entry(q, &all_q_list, all_q_node) |
2136 | blk_mq_unfreeze_queue(q); | 2172 | blk_mq_unfreeze_queue(q); |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); | |||
51 | * CPU -> queue mappings | 51 | * CPU -> queue mappings |
52 | */ | 52 | */ |
53 | extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); | 53 | extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); |
54 | extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); | 54 | extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, |
55 | const struct cpumask *online_mask); | ||
55 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); | 56 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
56 | 57 | ||
57 | /* | 58 | /* |
diff --git a/block/bounce.c b/block/bounce.c index 0611aea1cfe9..1cb5dd3a5da1 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
@@ -128,12 +128,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) | |||
128 | struct bio *bio_orig = bio->bi_private; | 128 | struct bio *bio_orig = bio->bi_private; |
129 | struct bio_vec *bvec, *org_vec; | 129 | struct bio_vec *bvec, *org_vec; |
130 | int i; | 130 | int i; |
131 | int start = bio_orig->bi_iter.bi_idx; | ||
131 | 132 | ||
132 | /* | 133 | /* |
133 | * free up bounce indirect pages used | 134 | * free up bounce indirect pages used |
134 | */ | 135 | */ |
135 | bio_for_each_segment_all(bvec, bio, i) { | 136 | bio_for_each_segment_all(bvec, bio, i) { |
136 | org_vec = bio_orig->bi_io_vec + i; | 137 | org_vec = bio_orig->bi_io_vec + i + start; |
138 | |||
137 | if (bvec->bv_page == org_vec->bv_page) | 139 | if (bvec->bv_page == org_vec->bv_page) |
138 | continue; | 140 | continue; |
139 | 141 | ||
diff --git a/crypto/ahash.c b/crypto/ahash.c index 8acb886032ae..9c1dc8d6106a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) | |||
544 | struct crypto_alg *base = &alg->halg.base; | 544 | struct crypto_alg *base = &alg->halg.base; |
545 | 545 | ||
546 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | 546 | if (alg->halg.digestsize > PAGE_SIZE / 8 || |
547 | alg->halg.statesize > PAGE_SIZE / 8) | 547 | alg->halg.statesize > PAGE_SIZE / 8 || |
548 | alg->halg.statesize == 0) | ||
548 | return -EINVAL; | 549 | return -EINVAL; |
549 | 550 | ||
550 | base->cra_type = &crypto_ahash_type; | 551 | base->cra_type = &crypto_ahash_type; |
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c | |||
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) | |||
332 | srlen = cert->raw_serial_size; | 332 | srlen = cert->raw_serial_size; |
333 | q = cert->raw_serial; | 333 | q = cert->raw_serial; |
334 | } | 334 | } |
335 | if (srlen > 1 && *q == 0) { | ||
336 | srlen--; | ||
337 | q++; | ||
338 | } | ||
339 | 335 | ||
340 | ret = -ENOMEM; | 336 | ret = -ENOMEM; |
341 | desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); | 337 | desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 35c2de136971..fa18753f5c34 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -940,6 +940,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
940 | char *xbuf[XBUFSIZE]; | 940 | char *xbuf[XBUFSIZE]; |
941 | char *xoutbuf[XBUFSIZE]; | 941 | char *xoutbuf[XBUFSIZE]; |
942 | int ret = -ENOMEM; | 942 | int ret = -ENOMEM; |
943 | unsigned int ivsize = crypto_skcipher_ivsize(tfm); | ||
943 | 944 | ||
944 | if (testmgr_alloc_buf(xbuf)) | 945 | if (testmgr_alloc_buf(xbuf)) |
945 | goto out_nobuf; | 946 | goto out_nobuf; |
@@ -975,7 +976,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
975 | continue; | 976 | continue; |
976 | 977 | ||
977 | if (template[i].iv) | 978 | if (template[i].iv) |
978 | memcpy(iv, template[i].iv, MAX_IVLEN); | 979 | memcpy(iv, template[i].iv, ivsize); |
979 | else | 980 | else |
980 | memset(iv, 0, MAX_IVLEN); | 981 | memset(iv, 0, MAX_IVLEN); |
981 | 982 | ||
@@ -1051,7 +1052,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
1051 | continue; | 1052 | continue; |
1052 | 1053 | ||
1053 | if (template[i].iv) | 1054 | if (template[i].iv) |
1054 | memcpy(iv, template[i].iv, MAX_IVLEN); | 1055 | memcpy(iv, template[i].iv, ivsize); |
1055 | else | 1056 | else |
1056 | memset(iv, 0, MAX_IVLEN); | 1057 | memset(iv, 0, MAX_IVLEN); |
1057 | 1058 | ||
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 09f37b516808..4dde37c3d8fc 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); | |||
61 | ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); | 61 | ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); |
62 | ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); | 62 | ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); |
63 | ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); | 63 | ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); |
64 | ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); | ||
64 | 65 | ||
65 | #if (!ACPI_REDUCED_HARDWARE) | 66 | #if (!ACPI_REDUCED_HARDWARE) |
66 | ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); | 67 | ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); |
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index f7731f260c31..591ea95319e2 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h | |||
@@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); | |||
85 | /* | 85 | /* |
86 | * tbfadt - FADT parse/convert/validate | 86 | * tbfadt - FADT parse/convert/validate |
87 | */ | 87 | */ |
88 | void acpi_tb_parse_fadt(u32 table_index); | 88 | void acpi_tb_parse_fadt(void); |
89 | 89 | ||
90 | void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); | 90 | void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); |
91 | 91 | ||
@@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); | |||
138 | */ | 138 | */ |
139 | acpi_status acpi_tb_initialize_facs(void); | 139 | acpi_status acpi_tb_initialize_facs(void); |
140 | 140 | ||
141 | u8 acpi_tb_tables_loaded(void); | ||
142 | |||
143 | void | 141 | void |
144 | acpi_tb_print_table_header(acpi_physical_address address, | 142 | acpi_tb_print_table_header(acpi_physical_address address, |
145 | struct acpi_table_header *header); | 143 | struct acpi_table_header *header); |
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index faad911d46b5..10ce48e16ebf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -71,7 +71,7 @@ acpi_status acpi_enable(void) | |||
71 | 71 | ||
72 | /* ACPI tables must be present */ | 72 | /* ACPI tables must be present */ |
73 | 73 | ||
74 | if (!acpi_tb_tables_loaded()) { | 74 | if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) { |
75 | return_ACPI_STATUS(AE_NO_ACPI_TABLES); | 75 | return_ACPI_STATUS(AE_NO_ACPI_TABLES); |
76 | } | 76 | } |
77 | 77 | ||
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 455a0700db39..a6454f4a6fb3 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c | |||
@@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) | |||
298 | * | 298 | * |
299 | * FUNCTION: acpi_tb_parse_fadt | 299 | * FUNCTION: acpi_tb_parse_fadt |
300 | * | 300 | * |
301 | * PARAMETERS: table_index - Index for the FADT | 301 | * PARAMETERS: None |
302 | * | 302 | * |
303 | * RETURN: None | 303 | * RETURN: None |
304 | * | 304 | * |
@@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) | |||
307 | * | 307 | * |
308 | ******************************************************************************/ | 308 | ******************************************************************************/ |
309 | 309 | ||
310 | void acpi_tb_parse_fadt(u32 table_index) | 310 | void acpi_tb_parse_fadt(void) |
311 | { | 311 | { |
312 | u32 length; | 312 | u32 length; |
313 | struct acpi_table_header *table; | 313 | struct acpi_table_header *table; |
@@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index) | |||
319 | * Get a local copy of the FADT and convert it to a common format | 319 | * Get a local copy of the FADT and convert it to a common format |
320 | * Map entire FADT, assumed to be smaller than one page. | 320 | * Map entire FADT, assumed to be smaller than one page. |
321 | */ | 321 | */ |
322 | length = acpi_gbl_root_table_list.tables[table_index].length; | 322 | length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length; |
323 | 323 | ||
324 | table = | 324 | table = |
325 | acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. | 325 | acpi_os_map_memory(acpi_gbl_root_table_list. |
326 | address, length); | 326 | tables[acpi_gbl_fadt_index].address, length); |
327 | if (!table) { | 327 | if (!table) { |
328 | return; | 328 | return; |
329 | } | 329 | } |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 4337990127cc..d8ddef38c947 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -99,29 +99,6 @@ acpi_status acpi_tb_initialize_facs(void) | |||
99 | 99 | ||
100 | /******************************************************************************* | 100 | /******************************************************************************* |
101 | * | 101 | * |
102 | * FUNCTION: acpi_tb_tables_loaded | ||
103 | * | ||
104 | * PARAMETERS: None | ||
105 | * | ||
106 | * RETURN: TRUE if required ACPI tables are loaded | ||
107 | * | ||
108 | * DESCRIPTION: Determine if the minimum required ACPI tables are present | ||
109 | * (FADT, FACS, DSDT) | ||
110 | * | ||
111 | ******************************************************************************/ | ||
112 | |||
113 | u8 acpi_tb_tables_loaded(void) | ||
114 | { | ||
115 | |||
116 | if (acpi_gbl_root_table_list.current_table_count >= 4) { | ||
117 | return (TRUE); | ||
118 | } | ||
119 | |||
120 | return (FALSE); | ||
121 | } | ||
122 | |||
123 | /******************************************************************************* | ||
124 | * | ||
125 | * FUNCTION: acpi_tb_check_dsdt_header | 102 | * FUNCTION: acpi_tb_check_dsdt_header |
126 | * | 103 | * |
127 | * PARAMETERS: None | 104 | * PARAMETERS: None |
@@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) | |||
392 | ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. | 369 | ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. |
393 | tables[table_index].signature, | 370 | tables[table_index].signature, |
394 | ACPI_SIG_FADT)) { | 371 | ACPI_SIG_FADT)) { |
395 | acpi_tb_parse_fadt(table_index); | 372 | acpi_gbl_fadt_index = table_index; |
373 | acpi_tb_parse_fadt(); | ||
396 | } | 374 | } |
397 | 375 | ||
398 | next_table: | 376 | next_table: |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 46506e7687cd..a212cefae524 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -315,14 +315,10 @@ static void acpi_bus_osc_support(void) | |||
315 | 315 | ||
316 | capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; | 316 | capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; |
317 | capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ | 317 | capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ |
318 | #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ | 318 | if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR)) |
319 | defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | 319 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; |
320 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; | 320 | if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) |
321 | #endif | 321 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; |
322 | |||
323 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) | ||
324 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; | ||
325 | #endif | ||
326 | 322 | ||
327 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; | 323 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; |
328 | 324 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) | |||
1044 | goto err_exit; | 1044 | goto err_exit; |
1045 | 1045 | ||
1046 | mutex_lock(&ec->mutex); | 1046 | mutex_lock(&ec->mutex); |
1047 | result = -ENODATA; | ||
1047 | list_for_each_entry(handler, &ec->list, node) { | 1048 | list_for_each_entry(handler, &ec->list, node) { |
1048 | if (value == handler->query_bit) { | 1049 | if (value == handler->query_bit) { |
1050 | result = 0; | ||
1049 | q->handler = acpi_ec_get_query_handler(handler); | 1051 | q->handler = acpi_ec_get_query_handler(handler); |
1050 | ec_dbg_evt("Query(0x%02x) scheduled", | 1052 | ec_dbg_evt("Query(0x%02x) scheduled", |
1051 | q->handler->query_bit); | 1053 | q->handler->query_bit); |
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index 9dcf83682e36..33505c651f62 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c | |||
@@ -33,13 +33,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { | |||
33 | static int int340x_thermal_handler_attach(struct acpi_device *adev, | 33 | static int int340x_thermal_handler_attach(struct acpi_device *adev, |
34 | const struct acpi_device_id *id) | 34 | const struct acpi_device_id *id) |
35 | { | 35 | { |
36 | #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) | 36 | if (IS_ENABLED(CONFIG_INT340X_THERMAL)) |
37 | acpi_create_platform_device(adev); | 37 | acpi_create_platform_device(adev); |
38 | #elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) | ||
39 | /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ | 38 | /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ |
40 | if (id->driver_data == INT3401_DEVICE) | 39 | else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && |
40 | id->driver_data == INT3401_DEVICE) | ||
41 | acpi_create_platform_device(adev); | 41 | acpi_create_platform_device(adev); |
42 | #endif | ||
43 | return 1; | 42 | return 1; |
44 | } | 43 | } |
45 | 44 | ||
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) | |||
372 | 372 | ||
373 | /* Interrupt Line values above 0xF are forbidden */ | 373 | /* Interrupt Line values above 0xF are forbidden */ |
374 | if (dev->irq > 0 && (dev->irq <= 0xF) && | 374 | if (dev->irq > 0 && (dev->irq <= 0xF) && |
375 | acpi_isa_irq_available(dev->irq) && | ||
375 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { | 376 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { |
376 | dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", | 377 | dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", |
377 | pin_name(dev->pin), dev->irq); | 378 | pin_name(dev->pin), dev->irq); |
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c | |||
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) | |||
498 | PIRQ_PENALTY_PCI_POSSIBLE; | 498 | PIRQ_PENALTY_PCI_POSSIBLE; |
499 | } | 499 | } |
500 | } | 500 | } |
501 | /* Add a penalty for the SCI */ | 501 | |
502 | acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; | ||
503 | return 0; | 502 | return 0; |
504 | } | 503 | } |
505 | 504 | ||
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) | |||
553 | irq = link->irq.possible[i]; | 552 | irq = link->irq.possible[i]; |
554 | } | 553 | } |
555 | } | 554 | } |
555 | if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { | ||
556 | printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " | ||
557 | "Try pci=noacpi or acpi=off\n", | ||
558 | acpi_device_name(link->device), | ||
559 | acpi_device_bid(link->device)); | ||
560 | return -ENODEV; | ||
561 | } | ||
556 | 562 | ||
557 | /* Attempt to enable the link device at this IRQ. */ | 563 | /* Attempt to enable the link device at this IRQ. */ |
558 | if (acpi_pci_link_set(link, irq)) { | 564 | if (acpi_pci_link_set(link, irq)) { |
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) | |||
821 | } | 827 | } |
822 | } | 828 | } |
823 | 829 | ||
830 | bool acpi_isa_irq_available(int irq) | ||
831 | { | ||
832 | return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || | ||
833 | acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); | ||
834 | } | ||
835 | |||
824 | /* | 836 | /* |
825 | * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with | 837 | * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with |
826 | * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for | 838 | * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index a8da3a50e374..0f5cb37636bc 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev) | |||
1578 | 1578 | ||
1579 | kfree(he_dev->rbpl_virt); | 1579 | kfree(he_dev->rbpl_virt); |
1580 | kfree(he_dev->rbpl_table); | 1580 | kfree(he_dev->rbpl_table); |
1581 | 1581 | dma_pool_destroy(he_dev->rbpl_pool); | |
1582 | if (he_dev->rbpl_pool) | ||
1583 | dma_pool_destroy(he_dev->rbpl_pool); | ||
1584 | 1582 | ||
1585 | if (he_dev->rbrq_base) | 1583 | if (he_dev->rbrq_base) |
1586 | dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), | 1584 | dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev) | |||
1594 | dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), | 1592 | dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
1595 | he_dev->tpdrq_base, he_dev->tpdrq_phys); | 1593 | he_dev->tpdrq_base, he_dev->tpdrq_phys); |
1596 | 1594 | ||
1597 | if (he_dev->tpd_pool) | 1595 | dma_pool_destroy(he_dev->tpd_pool); |
1598 | dma_pool_destroy(he_dev->tpd_pool); | ||
1599 | 1596 | ||
1600 | if (he_dev->pci_dev) { | 1597 | if (he_dev->pci_dev) { |
1601 | pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); | 1598 | pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 74e18b0a6d89..3d7fb6516f74 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg) | |||
805 | continue; | 805 | continue; |
806 | } | 806 | } |
807 | 807 | ||
808 | skb = alloc_skb(size + 1, GFP_ATOMIC); | 808 | /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of |
809 | * headroom, and ensures we can route packets back out an | ||
810 | * Ethernet interface (for example) without having to | ||
811 | * reallocate. Adding NET_IP_ALIGN also ensures that both | ||
812 | * PPPoATM and PPPoEoBR2684 packets end up aligned. */ | ||
813 | skb = netdev_alloc_skb_ip_align(NULL, size + 1); | ||
809 | if (!skb) { | 814 | if (!skb) { |
810 | if (net_ratelimit()) | 815 | if (net_ratelimit()) |
811 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); | 816 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); |
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg) | |||
869 | /* Allocate RX skbs for any ports which need them */ | 874 | /* Allocate RX skbs for any ports which need them */ |
870 | if (card->using_dma && card->atmdev[port] && | 875 | if (card->using_dma && card->atmdev[port] && |
871 | !card->rx_skb[port]) { | 876 | !card->rx_skb[port]) { |
872 | struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); | 877 | /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN |
878 | * here; the FPGA can only DMA to addresses which are | ||
879 | * aligned to 4 bytes. */ | ||
880 | struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE); | ||
873 | if (skb) { | 881 | if (skb) { |
874 | SKB_CB(skb)->dma_addr = | 882 | SKB_CB(skb)->dma_addr = |
875 | dma_map_single(&card->dev->dev, skb->data, | 883 | dma_map_single(&card->dev->dev, skb->data, |
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 764280a91776..e9fd32e91668 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c | |||
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) | |||
148 | 148 | ||
149 | if (sibling == cpu) /* skip itself */ | 149 | if (sibling == cpu) /* skip itself */ |
150 | continue; | 150 | continue; |
151 | |||
151 | sib_cpu_ci = get_cpu_cacheinfo(sibling); | 152 | sib_cpu_ci = get_cpu_cacheinfo(sibling); |
153 | if (!sib_cpu_ci->info_list) | ||
154 | continue; | ||
155 | |||
152 | sib_leaf = sib_cpu_ci->info_list + index; | 156 | sib_leaf = sib_cpu_ci->info_list + index; |
153 | cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); | 157 | cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); |
154 | cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); | 158 | cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); |
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) | |||
159 | 163 | ||
160 | static void free_cache_attributes(unsigned int cpu) | 164 | static void free_cache_attributes(unsigned int cpu) |
161 | { | 165 | { |
166 | if (!per_cpu_cacheinfo(cpu)) | ||
167 | return; | ||
168 | |||
162 | cache_shared_cpu_map_remove(cpu); | 169 | cache_shared_cpu_map_remove(cpu); |
163 | 170 | ||
164 | kfree(per_cpu_cacheinfo(cpu)); | 171 | kfree(per_cpu_cacheinfo(cpu)); |
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
514 | break; | 521 | break; |
515 | case CPU_DEAD: | 522 | case CPU_DEAD: |
516 | cache_remove_dev(cpu); | 523 | cache_remove_dev(cpu); |
517 | if (per_cpu_cacheinfo(cpu)) | 524 | free_cache_attributes(cpu); |
518 | free_cache_attributes(cpu); | ||
519 | break; | 525 | break; |
520 | } | 526 | } |
521 | return notifier_from_errno(rc); | 527 | return notifier_from_errno(rc); |
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 1857a5dd0816..134483daac25 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c | |||
@@ -63,20 +63,8 @@ static int platform_msi_init(struct irq_domain *domain, | |||
63 | unsigned int virq, irq_hw_number_t hwirq, | 63 | unsigned int virq, irq_hw_number_t hwirq, |
64 | msi_alloc_info_t *arg) | 64 | msi_alloc_info_t *arg) |
65 | { | 65 | { |
66 | struct irq_data *data; | 66 | return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, |
67 | 67 | info->chip, info->chip_data); | |
68 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, | ||
69 | info->chip, info->chip_data); | ||
70 | |||
71 | /* | ||
72 | * Save the MSI descriptor in handler_data so that the | ||
73 | * irq_write_msi_msg callback can retrieve it (and the | ||
74 | * associated device). | ||
75 | */ | ||
76 | data = irq_domain_get_irq_data(domain, virq); | ||
77 | data->handler_data = arg->desc; | ||
78 | |||
79 | return 0; | ||
80 | } | 68 | } |
81 | #else | 69 | #else |
82 | #define platform_msi_set_desc NULL | 70 | #define platform_msi_set_desc NULL |
@@ -97,7 +85,7 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info) | |||
97 | 85 | ||
98 | static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) | 86 | static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) |
99 | { | 87 | { |
100 | struct msi_desc *desc = irq_data_get_irq_handler_data(data); | 88 | struct msi_desc *desc = irq_data_get_msi_desc(data); |
101 | struct platform_msi_priv_data *priv_data; | 89 | struct platform_msi_priv_data *priv_data; |
102 | 90 | ||
103 | priv_data = desc->platform.msi_priv_data; | 91 | priv_data = desc->platform.msi_priv_data; |
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 2a4154a09e4d..85e17bacc834 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
@@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev) | |||
77 | dev_update_qos_constraint); | 77 | dev_update_qos_constraint); |
78 | 78 | ||
79 | if (constraint_ns > 0) { | 79 | if (constraint_ns > 0) { |
80 | constraint_ns -= td->start_latency_ns; | 80 | constraint_ns -= td->save_state_latency_ns + |
81 | td->stop_latency_ns + | ||
82 | td->start_latency_ns + | ||
83 | td->restore_state_latency_ns; | ||
81 | if (constraint_ns == 0) | 84 | if (constraint_ns == 0) |
82 | return false; | 85 | return false; |
83 | } | 86 | } |
84 | td->effective_constraint_ns = constraint_ns; | 87 | td->effective_constraint_ns = constraint_ns; |
85 | td->cached_stop_ok = constraint_ns > td->stop_latency_ns || | 88 | td->cached_stop_ok = constraint_ns >= 0; |
86 | constraint_ns == 0; | 89 | |
87 | /* | 90 | /* |
88 | * The children have been suspended already, so we don't need to take | 91 | * The children have been suspended already, so we don't need to take |
89 | * their stop latencies into account here. | 92 | * their stop latencies into account here. |
@@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) | |||
126 | 129 | ||
127 | off_on_time_ns = genpd->power_off_latency_ns + | 130 | off_on_time_ns = genpd->power_off_latency_ns + |
128 | genpd->power_on_latency_ns; | 131 | genpd->power_on_latency_ns; |
129 | /* | ||
130 | * It doesn't make sense to remove power from the domain if saving | ||
131 | * the state of all devices in it and the power off/power on operations | ||
132 | * take too much time. | ||
133 | * | ||
134 | * All devices in this domain have been stopped already at this point. | ||
135 | */ | ||
136 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | ||
137 | if (pdd->dev->driver) | ||
138 | off_on_time_ns += | ||
139 | to_gpd_data(pdd)->td.save_state_latency_ns; | ||
140 | } | ||
141 | 132 | ||
142 | min_off_time_ns = -1; | 133 | min_off_time_ns = -1; |
143 | /* | 134 | /* |
@@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) | |||
193 | * constraint_ns cannot be negative here, because the device has | 184 | * constraint_ns cannot be negative here, because the device has |
194 | * been suspended. | 185 | * been suspended. |
195 | */ | 186 | */ |
196 | constraint_ns -= td->restore_state_latency_ns; | ||
197 | if (constraint_ns <= off_on_time_ns) | 187 | if (constraint_ns <= off_on_time_ns) |
198 | return false; | 188 | return false; |
199 | 189 | ||
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) | |||
892 | u32 microvolt[3] = {0}; | 892 | u32 microvolt[3] = {0}; |
893 | int count, ret; | 893 | int count, ret; |
894 | 894 | ||
895 | count = of_property_count_u32_elems(opp->np, "opp-microvolt"); | 895 | /* Missing property isn't a problem, but an invalid entry is */ |
896 | if (!count) | 896 | if (!of_find_property(opp->np, "opp-microvolt", NULL)) |
897 | return 0; | 897 | return 0; |
898 | 898 | ||
899 | count = of_property_count_u32_elems(opp->np, "opp-microvolt"); | ||
900 | if (count < 0) { | ||
901 | dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", | ||
902 | __func__, count); | ||
903 | return count; | ||
904 | } | ||
905 | |||
899 | /* There can be one or three elements here */ | 906 | /* There can be one or three elements here */ |
900 | if (count != 1 && count != 3) { | 907 | if (count != 1 && count != 3) { |
901 | dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", | 908 | dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", |
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); | |||
1063 | * share a common logic which is isolated here. | 1070 | * share a common logic which is isolated here. |
1064 | * | 1071 | * |
1065 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1072 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1066 | * copy operation, returns 0 if no modifcation was done OR modification was | 1073 | * copy operation, returns 0 if no modification was done OR modification was |
1067 | * successful. | 1074 | * successful. |
1068 | * | 1075 | * |
1069 | * Locking: The internal device_opp and opp structures are RCU protected. | 1076 | * Locking: The internal device_opp and opp structures are RCU protected. |
@@ -1151,7 +1158,7 @@ unlock: | |||
1151 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 1158 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
1152 | * | 1159 | * |
1153 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1160 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1154 | * copy operation, returns 0 if no modifcation was done OR modification was | 1161 | * copy operation, returns 0 if no modification was done OR modification was |
1155 | * successful. | 1162 | * successful. |
1156 | */ | 1163 | */ |
1157 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 1164 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | |||
1177 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 1184 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
1178 | * | 1185 | * |
1179 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1186 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1180 | * copy operation, returns 0 if no modifcation was done OR modification was | 1187 | * copy operation, returns 0 if no modification was done OR modification was |
1181 | * successful. | 1188 | * successful. |
1182 | */ | 1189 | */ |
1183 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) | 1190 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f42f2bac6466..4c55cfbad19e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c | |||
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); | |||
32 | /* Calculate the length of a fixed format */ | 32 | /* Calculate the length of a fixed format */ |
33 | static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) | 33 | static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) |
34 | { | 34 | { |
35 | snprintf(buf, buf_size, "%x", max_val); | 35 | return snprintf(NULL, 0, "%x", max_val); |
36 | return strlen(buf); | ||
37 | } | 36 | } |
38 | 37 | ||
39 | static ssize_t regmap_name_read_file(struct file *file, | 38 | static ssize_t regmap_name_read_file(struct file *file, |
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, | |||
432 | /* If we're in the region the user is trying to read */ | 431 | /* If we're in the region the user is trying to read */ |
433 | if (p >= *ppos) { | 432 | if (p >= *ppos) { |
434 | /* ...but not beyond it */ | 433 | /* ...but not beyond it */ |
435 | if (buf_pos >= count - 1 - tot_len) | 434 | if (buf_pos + tot_len + 1 >= count) |
436 | break; | 435 | break; |
437 | 436 | ||
438 | /* Format the register */ | 437 | /* Format the register */ |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) | |||
1486 | { | 1486 | { |
1487 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; | 1487 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; |
1488 | struct loop_device *lo = cmd->rq->q->queuedata; | 1488 | struct loop_device *lo = cmd->rq->q->queuedata; |
1489 | int ret = -EIO; | 1489 | int ret = 0; |
1490 | 1490 | ||
1491 | if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) | 1491 | if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { |
1492 | ret = -EIO; | ||
1492 | goto failed; | 1493 | goto failed; |
1494 | } | ||
1493 | 1495 | ||
1494 | ret = do_req_filebacked(lo, cmd->rq); | 1496 | ret = do_req_filebacked(lo, cmd->rq); |
1495 | |||
1496 | failed: | 1497 | failed: |
1497 | if (ret) | 1498 | blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); |
1498 | cmd->rq->errors = -EIO; | ||
1499 | blk_mq_complete_request(cmd->rq); | ||
1500 | } | 1499 | } |
1501 | 1500 | ||
1502 | static void loop_queue_write_work(struct work_struct *work) | 1501 | static void loop_queue_write_work(struct work_struct *work) |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 17269a3b85f2..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) | |||
289 | case NULL_IRQ_SOFTIRQ: | 289 | case NULL_IRQ_SOFTIRQ: |
290 | switch (queue_mode) { | 290 | switch (queue_mode) { |
291 | case NULL_Q_MQ: | 291 | case NULL_Q_MQ: |
292 | blk_mq_complete_request(cmd->rq); | 292 | blk_mq_complete_request(cmd->rq, cmd->rq->errors); |
293 | break; | 293 | break; |
294 | case NULL_Q_RQ: | 294 | case NULL_Q_RQ: |
295 | blk_complete_request(cmd->rq); | 295 | blk_complete_request(cmd->rq); |
@@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = { | |||
406 | .complete = null_softirq_done_fn, | 406 | .complete = null_softirq_done_fn, |
407 | }; | 407 | }; |
408 | 408 | ||
409 | static void cleanup_queue(struct nullb_queue *nq) | ||
410 | { | ||
411 | kfree(nq->tag_map); | ||
412 | kfree(nq->cmds); | ||
413 | } | ||
414 | |||
415 | static void cleanup_queues(struct nullb *nullb) | ||
416 | { | ||
417 | int i; | ||
418 | |||
419 | for (i = 0; i < nullb->nr_queues; i++) | ||
420 | cleanup_queue(&nullb->queues[i]); | ||
421 | |||
422 | kfree(nullb->queues); | ||
423 | } | ||
424 | |||
409 | static void null_del_dev(struct nullb *nullb) | 425 | static void null_del_dev(struct nullb *nullb) |
410 | { | 426 | { |
411 | list_del_init(&nullb->list); | 427 | list_del_init(&nullb->list); |
@@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb) | |||
415 | if (queue_mode == NULL_Q_MQ) | 431 | if (queue_mode == NULL_Q_MQ) |
416 | blk_mq_free_tag_set(&nullb->tag_set); | 432 | blk_mq_free_tag_set(&nullb->tag_set); |
417 | put_disk(nullb->disk); | 433 | put_disk(nullb->disk); |
434 | cleanup_queues(nullb); | ||
418 | kfree(nullb); | 435 | kfree(nullb); |
419 | } | 436 | } |
420 | 437 | ||
@@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq) | |||
459 | return 0; | 476 | return 0; |
460 | } | 477 | } |
461 | 478 | ||
462 | static void cleanup_queue(struct nullb_queue *nq) | ||
463 | { | ||
464 | kfree(nq->tag_map); | ||
465 | kfree(nq->cmds); | ||
466 | } | ||
467 | |||
468 | static void cleanup_queues(struct nullb *nullb) | ||
469 | { | ||
470 | int i; | ||
471 | |||
472 | for (i = 0; i < nullb->nr_queues; i++) | ||
473 | cleanup_queue(&nullb->queues[i]); | ||
474 | |||
475 | kfree(nullb->queues); | ||
476 | } | ||
477 | |||
478 | static int setup_queues(struct nullb *nullb) | 479 | static int setup_queues(struct nullb *nullb) |
479 | { | 480 | { |
480 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), | 481 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
@@ -588,8 +589,7 @@ static int null_add_dev(void) | |||
588 | blk_queue_physical_block_size(nullb->q, bs); | 589 | blk_queue_physical_block_size(nullb->q, bs); |
589 | 590 | ||
590 | size = gb * 1024 * 1024 * 1024ULL; | 591 | size = gb * 1024 * 1024 * 1024ULL; |
591 | sector_div(size, bs); | 592 | set_capacity(disk, size >> 9); |
592 | set_capacity(disk, size); | ||
593 | 593 | ||
594 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; | 594 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
595 | disk->major = null_major; | 595 | disk->major = null_major; |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..6f04771f1019 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
618 | spin_unlock_irqrestore(req->q->queue_lock, flags); | 618 | spin_unlock_irqrestore(req->q->queue_lock, flags); |
619 | return; | 619 | return; |
620 | } | 620 | } |
621 | |||
621 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { | 622 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { |
622 | if (cmd_rq->ctx == CMD_CTX_CANCELLED) | 623 | if (cmd_rq->ctx == CMD_CTX_CANCELLED) |
623 | req->errors = -EINTR; | 624 | status = -EINTR; |
624 | else | ||
625 | req->errors = status; | ||
626 | } else { | 625 | } else { |
627 | req->errors = nvme_error_status(status); | 626 | status = nvme_error_status(status); |
628 | } | 627 | } |
629 | } else | 628 | } |
630 | req->errors = 0; | 629 | |
631 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { | 630 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { |
632 | u32 result = le32_to_cpup(&cqe->result); | 631 | u32 result = le32_to_cpup(&cqe->result); |
633 | req->special = (void *)(uintptr_t)result; | 632 | req->special = (void *)(uintptr_t)result; |
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
650 | } | 649 | } |
651 | nvme_free_iod(nvmeq->dev, iod); | 650 | nvme_free_iod(nvmeq->dev, iod); |
652 | 651 | ||
653 | blk_mq_complete_request(req); | 652 | blk_mq_complete_request(req, status); |
654 | } | 653 | } |
655 | 654 | ||
656 | /* length is in bytes. gfp flags indicates whether we may sleep. */ | 655 | /* length is in bytes. gfp flags indicates whether we may sleep. */ |
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
863 | if (ns && ns->ms && !blk_integrity_rq(req)) { | 862 | if (ns && ns->ms && !blk_integrity_rq(req)) { |
864 | if (!(ns->pi_type && ns->ms == 8) && | 863 | if (!(ns->pi_type && ns->ms == 8) && |
865 | req->cmd_type != REQ_TYPE_DRV_PRIV) { | 864 | req->cmd_type != REQ_TYPE_DRV_PRIV) { |
866 | req->errors = -EFAULT; | 865 | blk_mq_complete_request(req, -EFAULT); |
867 | blk_mq_complete_request(req); | ||
868 | return BLK_MQ_RQ_QUEUE_OK; | 866 | return BLK_MQ_RQ_QUEUE_OK; |
869 | } | 867 | } |
870 | } | 868 | } |
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) | |||
2439 | list_sort(NULL, &dev->namespaces, ns_cmp); | 2437 | list_sort(NULL, &dev->namespaces, ns_cmp); |
2440 | } | 2438 | } |
2441 | 2439 | ||
2440 | static void nvme_set_irq_hints(struct nvme_dev *dev) | ||
2441 | { | ||
2442 | struct nvme_queue *nvmeq; | ||
2443 | int i; | ||
2444 | |||
2445 | for (i = 0; i < dev->online_queues; i++) { | ||
2446 | nvmeq = dev->queues[i]; | ||
2447 | |||
2448 | if (!nvmeq->tags || !(*nvmeq->tags)) | ||
2449 | continue; | ||
2450 | |||
2451 | irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, | ||
2452 | blk_mq_tags_cpumask(*nvmeq->tags)); | ||
2453 | } | ||
2454 | } | ||
2455 | |||
2442 | static void nvme_dev_scan(struct work_struct *work) | 2456 | static void nvme_dev_scan(struct work_struct *work) |
2443 | { | 2457 | { |
2444 | struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); | 2458 | struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); |
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work) | |||
2450 | return; | 2464 | return; |
2451 | nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); | 2465 | nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); |
2452 | kfree(ctrl); | 2466 | kfree(ctrl); |
2467 | nvme_set_irq_hints(dev); | ||
2453 | } | 2468 | } |
2454 | 2469 | ||
2455 | /* | 2470 | /* |
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = { | |||
2953 | .compat_ioctl = nvme_dev_ioctl, | 2968 | .compat_ioctl = nvme_dev_ioctl, |
2954 | }; | 2969 | }; |
2955 | 2970 | ||
2956 | static void nvme_set_irq_hints(struct nvme_dev *dev) | ||
2957 | { | ||
2958 | struct nvme_queue *nvmeq; | ||
2959 | int i; | ||
2960 | |||
2961 | for (i = 0; i < dev->online_queues; i++) { | ||
2962 | nvmeq = dev->queues[i]; | ||
2963 | |||
2964 | if (!nvmeq->tags || !(*nvmeq->tags)) | ||
2965 | continue; | ||
2966 | |||
2967 | irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, | ||
2968 | blk_mq_tags_cpumask(*nvmeq->tags)); | ||
2969 | } | ||
2970 | } | ||
2971 | |||
2972 | static int nvme_dev_start(struct nvme_dev *dev) | 2971 | static int nvme_dev_start(struct nvme_dev *dev) |
2973 | { | 2972 | { |
2974 | int result; | 2973 | int result; |
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev) | |||
3010 | if (result) | 3009 | if (result) |
3011 | goto free_tags; | 3010 | goto free_tags; |
3012 | 3011 | ||
3013 | nvme_set_irq_hints(dev); | ||
3014 | |||
3015 | dev->event_limit = 1; | 3012 | dev->event_limit = 1; |
3016 | return result; | 3013 | return result; |
3017 | 3014 | ||
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) | |||
3062 | } else { | 3059 | } else { |
3063 | nvme_unfreeze_queues(dev); | 3060 | nvme_unfreeze_queues(dev); |
3064 | nvme_dev_add(dev); | 3061 | nvme_dev_add(dev); |
3065 | nvme_set_irq_hints(dev); | ||
3066 | } | 3062 | } |
3067 | return 0; | 3063 | return 0; |
3068 | } | 3064 | } |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d93a0372b37b..f5e49b639818 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, | |||
1863 | rbd_osd_read_callback(obj_request); | 1863 | rbd_osd_read_callback(obj_request); |
1864 | break; | 1864 | break; |
1865 | case CEPH_OSD_OP_SETALLOCHINT: | 1865 | case CEPH_OSD_OP_SETALLOCHINT: |
1866 | rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE); | 1866 | rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || |
1867 | osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); | ||
1867 | /* fall through */ | 1868 | /* fall through */ |
1868 | case CEPH_OSD_OP_WRITE: | 1869 | case CEPH_OSD_OP_WRITE: |
1870 | case CEPH_OSD_OP_WRITEFULL: | ||
1869 | rbd_osd_write_callback(obj_request); | 1871 | rbd_osd_write_callback(obj_request); |
1870 | break; | 1872 | break; |
1871 | case CEPH_OSD_OP_STAT: | 1873 | case CEPH_OSD_OP_STAT: |
@@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, | |||
2401 | opcode = CEPH_OSD_OP_ZERO; | 2403 | opcode = CEPH_OSD_OP_ZERO; |
2402 | } | 2404 | } |
2403 | } else if (op_type == OBJ_OP_WRITE) { | 2405 | } else if (op_type == OBJ_OP_WRITE) { |
2404 | opcode = CEPH_OSD_OP_WRITE; | 2406 | if (!offset && length == object_size) |
2407 | opcode = CEPH_OSD_OP_WRITEFULL; | ||
2408 | else | ||
2409 | opcode = CEPH_OSD_OP_WRITE; | ||
2405 | osd_req_op_alloc_hint_init(osd_request, num_ops, | 2410 | osd_req_op_alloc_hint_init(osd_request, num_ops, |
2406 | object_size, object_size); | 2411 | object_size, object_size); |
2407 | num_ops++; | 2412 | num_ops++; |
@@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
3760 | /* set io sizes to object size */ | 3765 | /* set io sizes to object size */ |
3761 | segment_size = rbd_obj_bytes(&rbd_dev->header); | 3766 | segment_size = rbd_obj_bytes(&rbd_dev->header); |
3762 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); | 3767 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); |
3768 | q->limits.max_sectors = queue_max_hw_sectors(q); | ||
3763 | blk_queue_max_segments(q, segment_size / SECTOR_SIZE); | 3769 | blk_queue_max_segments(q, segment_size / SECTOR_SIZE); |
3764 | blk_queue_max_segment_size(q, segment_size); | 3770 | blk_queue_max_segment_size(q, segment_size); |
3765 | blk_queue_io_min(q, segment_size); | 3771 | blk_queue_io_min(q, segment_size); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) | |||
144 | do { | 144 | do { |
145 | virtqueue_disable_cb(vq); | 145 | virtqueue_disable_cb(vq); |
146 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { | 146 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { |
147 | blk_mq_complete_request(vbr->req); | 147 | blk_mq_complete_request(vbr->req, vbr->req->errors); |
148 | req_done = true; | 148 | req_done = true; |
149 | } | 149 | } |
150 | if (unlikely(virtqueue_is_broken(vq))) | 150 | if (unlikely(virtqueue_is_broken(vq))) |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, | |||
212 | 212 | ||
213 | static int xen_blkif_disconnect(struct xen_blkif *blkif) | 213 | static int xen_blkif_disconnect(struct xen_blkif *blkif) |
214 | { | 214 | { |
215 | struct pending_req *req, *n; | ||
216 | int i = 0, j; | ||
217 | |||
215 | if (blkif->xenblkd) { | 218 | if (blkif->xenblkd) { |
216 | kthread_stop(blkif->xenblkd); | 219 | kthread_stop(blkif->xenblkd); |
217 | wake_up(&blkif->shutdown_wq); | 220 | wake_up(&blkif->shutdown_wq); |
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
238 | /* Remove all persistent grants and the cache of ballooned pages. */ | 241 | /* Remove all persistent grants and the cache of ballooned pages. */ |
239 | xen_blkbk_free_caches(blkif); | 242 | xen_blkbk_free_caches(blkif); |
240 | 243 | ||
244 | /* Check that there is no request in use */ | ||
245 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
246 | list_del(&req->free_list); | ||
247 | |||
248 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
249 | kfree(req->segments[j]); | ||
250 | |||
251 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
252 | kfree(req->indirect_pages[j]); | ||
253 | |||
254 | kfree(req); | ||
255 | i++; | ||
256 | } | ||
257 | |||
258 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | ||
259 | blkif->nr_ring_pages = 0; | ||
260 | |||
241 | return 0; | 261 | return 0; |
242 | } | 262 | } |
243 | 263 | ||
244 | static void xen_blkif_free(struct xen_blkif *blkif) | 264 | static void xen_blkif_free(struct xen_blkif *blkif) |
245 | { | 265 | { |
246 | struct pending_req *req, *n; | ||
247 | int i = 0, j; | ||
248 | 266 | ||
249 | xen_blkif_disconnect(blkif); | 267 | xen_blkif_disconnect(blkif); |
250 | xen_vbd_free(&blkif->vbd); | 268 | xen_vbd_free(&blkif->vbd); |
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) | |||
257 | BUG_ON(!list_empty(&blkif->free_pages)); | 275 | BUG_ON(!list_empty(&blkif->free_pages)); |
258 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 276 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
259 | 277 | ||
260 | /* Check that there is no request in use */ | ||
261 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
262 | list_del(&req->free_list); | ||
263 | |||
264 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
265 | kfree(req->segments[j]); | ||
266 | |||
267 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
268 | kfree(req->indirect_pages[j]); | ||
269 | |||
270 | kfree(req); | ||
271 | i++; | ||
272 | } | ||
273 | |||
274 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | ||
275 | |||
276 | kmem_cache_free(xen_blkif_cachep, blkif); | 278 | kmem_cache_free(xen_blkif_cachep, blkif); |
277 | } | 279 | } |
278 | 280 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..611170896b8c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1142 | RING_IDX i, rp; | 1142 | RING_IDX i, rp; |
1143 | unsigned long flags; | 1143 | unsigned long flags; |
1144 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | 1144 | struct blkfront_info *info = (struct blkfront_info *)dev_id; |
1145 | int error; | ||
1145 | 1146 | ||
1146 | spin_lock_irqsave(&info->io_lock, flags); | 1147 | spin_lock_irqsave(&info->io_lock, flags); |
1147 | 1148 | ||
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1182 | continue; | 1183 | continue; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; | 1186 | error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; |
1186 | switch (bret->operation) { | 1187 | switch (bret->operation) { |
1187 | case BLKIF_OP_DISCARD: | 1188 | case BLKIF_OP_DISCARD: |
1188 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1189 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
1189 | struct request_queue *rq = info->rq; | 1190 | struct request_queue *rq = info->rq; |
1190 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1191 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
1191 | info->gd->disk_name, op_name(bret->operation)); | 1192 | info->gd->disk_name, op_name(bret->operation)); |
1192 | req->errors = -EOPNOTSUPP; | 1193 | error = -EOPNOTSUPP; |
1193 | info->feature_discard = 0; | 1194 | info->feature_discard = 0; |
1194 | info->feature_secdiscard = 0; | 1195 | info->feature_secdiscard = 0; |
1195 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); | 1196 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); |
1196 | queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); | 1197 | queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); |
1197 | } | 1198 | } |
1198 | blk_mq_complete_request(req); | 1199 | blk_mq_complete_request(req, error); |
1199 | break; | 1200 | break; |
1200 | case BLKIF_OP_FLUSH_DISKCACHE: | 1201 | case BLKIF_OP_FLUSH_DISKCACHE: |
1201 | case BLKIF_OP_WRITE_BARRIER: | 1202 | case BLKIF_OP_WRITE_BARRIER: |
1202 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1203 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
1203 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1204 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
1204 | info->gd->disk_name, op_name(bret->operation)); | 1205 | info->gd->disk_name, op_name(bret->operation)); |
1205 | req->errors = -EOPNOTSUPP; | 1206 | error = -EOPNOTSUPP; |
1206 | } | 1207 | } |
1207 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | 1208 | if (unlikely(bret->status == BLKIF_RSP_ERROR && |
1208 | info->shadow[id].req.u.rw.nr_segments == 0)) { | 1209 | info->shadow[id].req.u.rw.nr_segments == 0)) { |
1209 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", | 1210 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", |
1210 | info->gd->disk_name, op_name(bret->operation)); | 1211 | info->gd->disk_name, op_name(bret->operation)); |
1211 | req->errors = -EOPNOTSUPP; | 1212 | error = -EOPNOTSUPP; |
1212 | } | 1213 | } |
1213 | if (unlikely(req->errors)) { | 1214 | if (unlikely(error)) { |
1214 | if (req->errors == -EOPNOTSUPP) | 1215 | if (error == -EOPNOTSUPP) |
1215 | req->errors = 0; | 1216 | error = 0; |
1216 | info->feature_flush = 0; | 1217 | info->feature_flush = 0; |
1217 | xlvbd_flush(info); | 1218 | xlvbd_flush(info); |
1218 | } | 1219 | } |
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1223 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " | 1224 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " |
1224 | "request: %x\n", bret->status); | 1225 | "request: %x\n", bret->status); |
1225 | 1226 | ||
1226 | blk_mq_complete_request(req); | 1227 | blk_mq_complete_request(req, error); |
1227 | break; | 1228 | break; |
1228 | default: | 1229 | default: |
1229 | BUG(); | 1230 | BUG(); |
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 965d1afb0eaa..5cb13ca3a3ac 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c | |||
@@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp) | |||
330 | * allocate new zcomp and initialize it. return compressing | 330 | * allocate new zcomp and initialize it. return compressing |
331 | * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) | 331 | * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) |
332 | * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in | 332 | * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in |
333 | * case of allocation error. | 333 | * case of allocation error, or any other error potentially |
334 | * returned by functions zcomp_strm_{multi,single}_create. | ||
334 | */ | 335 | */ |
335 | struct zcomp *zcomp_create(const char *compress, int max_strm) | 336 | struct zcomp *zcomp_create(const char *compress, int max_strm) |
336 | { | 337 | { |
337 | struct zcomp *comp; | 338 | struct zcomp *comp; |
338 | struct zcomp_backend *backend; | 339 | struct zcomp_backend *backend; |
340 | int error; | ||
339 | 341 | ||
340 | backend = find_backend(compress); | 342 | backend = find_backend(compress); |
341 | if (!backend) | 343 | if (!backend) |
@@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm) | |||
347 | 349 | ||
348 | comp->backend = backend; | 350 | comp->backend = backend; |
349 | if (max_strm > 1) | 351 | if (max_strm > 1) |
350 | zcomp_strm_multi_create(comp, max_strm); | 352 | error = zcomp_strm_multi_create(comp, max_strm); |
351 | else | 353 | else |
352 | zcomp_strm_single_create(comp); | 354 | error = zcomp_strm_single_create(comp); |
353 | if (!comp->stream) { | 355 | if (error) { |
354 | kfree(comp); | 356 | kfree(comp); |
355 | return ERR_PTR(-ENOMEM); | 357 | return ERR_PTR(error); |
356 | } | 358 | } |
357 | return comp; | 359 | return comp; |
358 | } | 360 | } |
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17681..0ebca8ba7bc4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL | |||
36 | 36 | ||
37 | config ARM_CCI500_PMU | 37 | config ARM_CCI500_PMU |
38 | bool "ARM CCI500 PMU support" | 38 | bool "ARM CCI500 PMU support" |
39 | default y | ||
40 | depends on (ARM && CPU_V7) || ARM64 | 39 | depends on (ARM && CPU_V7) || ARM64 |
41 | depends on PERF_EVENTS | 40 | depends on PERF_EVENTS |
42 | select ARM_CCI_PMU | 41 | select ARM_CCI_PMU |
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index c37cf754a985..3c77645405e5 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c | |||
@@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev) | |||
344 | if (IS_ERR(ctx->csr_base)) | 344 | if (IS_ERR(ctx->csr_base)) |
345 | return PTR_ERR(ctx->csr_base); | 345 | return PTR_ERR(ctx->csr_base); |
346 | 346 | ||
347 | ctx->irq = platform_get_irq(pdev, 0); | 347 | rc = platform_get_irq(pdev, 0); |
348 | if (ctx->irq < 0) { | 348 | if (rc < 0) { |
349 | dev_err(&pdev->dev, "No IRQ resource\n"); | 349 | dev_err(&pdev->dev, "No IRQ resource\n"); |
350 | return ctx->irq; | 350 | return rc; |
351 | } | 351 | } |
352 | ctx->irq = rc; | ||
352 | 353 | ||
353 | dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", | 354 | dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", |
354 | ctx->csr_base, ctx->irq); | 355 | ctx->csr_base, ctx->irq); |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 43e2c3ad6c31..0ebcf449778a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -2437,7 +2437,8 @@ static int __clk_init(struct device *dev, struct clk *clk_user) | |||
2437 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { | 2437 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
2438 | if (orphan->num_parents && orphan->ops->get_parent) { | 2438 | if (orphan->num_parents && orphan->ops->get_parent) { |
2439 | i = orphan->ops->get_parent(orphan->hw); | 2439 | i = orphan->ops->get_parent(orphan->hw); |
2440 | if (!strcmp(core->name, orphan->parent_names[i])) | 2440 | if (i >= 0 && i < orphan->num_parents && |
2441 | !strcmp(core->name, orphan->parent_names[i])) | ||
2441 | clk_core_reparent(orphan, core); | 2442 | clk_core_reparent(orphan, core); |
2442 | continue; | 2443 | continue; |
2443 | } | 2444 | } |
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c index 2a38eb4a2552..6cf38dc1c929 100644 --- a/drivers/clk/h8300/clk-h8s2678.c +++ b/drivers/clk/h8300/clk-h8s2678.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/err.h> | 8 | #include <linux/err.h> |
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/of_address.h> | 10 | #include <linux/of_address.h> |
11 | #include <linux/slab.h> | ||
11 | 12 | ||
12 | static DEFINE_SPINLOCK(clklock); | 13 | static DEFINE_SPINLOCK(clklock); |
13 | 14 | ||
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index 2c16807341dc..e43485448612 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig | |||
@@ -1,6 +1,12 @@ | |||
1 | config COMMON_CLK_HI6220 | 1 | config COMMON_CLK_HI6220 |
2 | bool "Hi6220 Clock Driver" | 2 | bool "Hi6220 Clock Driver" |
3 | depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX | 3 | depends on ARCH_HISI || COMPILE_TEST |
4 | default ARCH_HISI | 4 | default ARCH_HISI |
5 | help | 5 | help |
6 | Build the Hisilicon Hi6220 clock driver based on the common clock framework. | 6 | Build the Hisilicon Hi6220 clock driver based on the common clock framework. |
7 | |||
8 | config STUB_CLK_HI6220 | ||
9 | bool "Hi6220 Stub Clock Driver" | ||
10 | depends on COMMON_CLK_HI6220 && MAILBOX | ||
11 | help | ||
12 | Build the Hisilicon Hi6220 stub clock driver. | ||
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 4a1001a11f04..74dba31590f9 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile | |||
@@ -7,4 +7,5 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o | |||
7 | obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o | 7 | obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o |
8 | obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o | 8 | obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o |
9 | obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o | 9 | obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o |
10 | obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o clk-hi6220-stub.o | 10 | obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o |
11 | obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o | ||
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 5837eb8a212f..85da8b983256 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c | |||
@@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node) | |||
197 | for_each_node_by_type(dn, "cpu") { | 197 | for_each_node_by_type(dn, "cpu") { |
198 | struct clk_init_data init; | 198 | struct clk_init_data init; |
199 | struct clk *clk; | 199 | struct clk *clk; |
200 | struct clk *parent_clk; | ||
200 | char *clk_name = kzalloc(5, GFP_KERNEL); | 201 | char *clk_name = kzalloc(5, GFP_KERNEL); |
201 | int cpu, err; | 202 | int cpu, err; |
202 | 203 | ||
@@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node) | |||
208 | goto bail_out; | 209 | goto bail_out; |
209 | 210 | ||
210 | sprintf(clk_name, "cpu%d", cpu); | 211 | sprintf(clk_name, "cpu%d", cpu); |
212 | parent_clk = of_clk_get(node, 0); | ||
211 | 213 | ||
212 | cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); | 214 | cpuclk[cpu].parent_name = __clk_get_name(parent_clk); |
213 | cpuclk[cpu].clk_name = clk_name; | 215 | cpuclk[cpu].clk_name = clk_name; |
214 | cpuclk[cpu].cpu = cpu; | 216 | cpuclk[cpu].cpu = cpu; |
215 | cpuclk[cpu].reg_base = clock_complex_base; | 217 | cpuclk[cpu].reg_base = clock_complex_base; |
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index ed02bbc7b11f..abb47608713b 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c | |||
@@ -716,6 +716,8 @@ static const char *const rk3188_critical_clocks[] __initconst = { | |||
716 | "aclk_cpu", | 716 | "aclk_cpu", |
717 | "aclk_peri", | 717 | "aclk_peri", |
718 | "hclk_peri", | 718 | "hclk_peri", |
719 | "pclk_cpu", | ||
720 | "pclk_peri", | ||
719 | }; | 721 | }; |
720 | 722 | ||
721 | static void __init rk3188_common_clk_init(struct device_node *np) | 723 | static void __init rk3188_common_clk_init(struct device_node *np) |
@@ -744,8 +746,6 @@ static void __init rk3188_common_clk_init(struct device_node *np) | |||
744 | 746 | ||
745 | rockchip_clk_register_branches(common_clk_branches, | 747 | rockchip_clk_register_branches(common_clk_branches, |
746 | ARRAY_SIZE(common_clk_branches)); | 748 | ARRAY_SIZE(common_clk_branches)); |
747 | rockchip_clk_protect_critical(rk3188_critical_clocks, | ||
748 | ARRAY_SIZE(rk3188_critical_clocks)); | ||
749 | 749 | ||
750 | rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), | 750 | rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), |
751 | ROCKCHIP_SOFTRST_HIWORD_MASK); | 751 | ROCKCHIP_SOFTRST_HIWORD_MASK); |
@@ -765,6 +765,8 @@ static void __init rk3066a_clk_init(struct device_node *np) | |||
765 | mux_armclk_p, ARRAY_SIZE(mux_armclk_p), | 765 | mux_armclk_p, ARRAY_SIZE(mux_armclk_p), |
766 | &rk3066_cpuclk_data, rk3066_cpuclk_rates, | 766 | &rk3066_cpuclk_data, rk3066_cpuclk_rates, |
767 | ARRAY_SIZE(rk3066_cpuclk_rates)); | 767 | ARRAY_SIZE(rk3066_cpuclk_rates)); |
768 | rockchip_clk_protect_critical(rk3188_critical_clocks, | ||
769 | ARRAY_SIZE(rk3188_critical_clocks)); | ||
768 | } | 770 | } |
769 | CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init); | 771 | CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init); |
770 | 772 | ||
@@ -801,6 +803,9 @@ static void __init rk3188a_clk_init(struct device_node *np) | |||
801 | pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n", | 803 | pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n", |
802 | __func__); | 804 | __func__); |
803 | } | 805 | } |
806 | |||
807 | rockchip_clk_protect_critical(rk3188_critical_clocks, | ||
808 | ARRAY_SIZE(rk3188_critical_clocks)); | ||
804 | } | 809 | } |
805 | CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init); | 810 | CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init); |
806 | 811 | ||
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index 9c5d61e698ef..7e6b783e6eee 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c | |||
@@ -818,6 +818,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { | |||
818 | GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), | 818 | GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), |
819 | }; | 819 | }; |
820 | 820 | ||
821 | static const char *const rk3368_critical_clocks[] __initconst = { | ||
822 | "pclk_pd_pmu", | ||
823 | }; | ||
824 | |||
821 | static void __init rk3368_clk_init(struct device_node *np) | 825 | static void __init rk3368_clk_init(struct device_node *np) |
822 | { | 826 | { |
823 | void __iomem *reg_base; | 827 | void __iomem *reg_base; |
@@ -862,6 +866,8 @@ static void __init rk3368_clk_init(struct device_node *np) | |||
862 | RK3368_GRF_SOC_STATUS0); | 866 | RK3368_GRF_SOC_STATUS0); |
863 | rockchip_clk_register_branches(rk3368_clk_branches, | 867 | rockchip_clk_register_branches(rk3368_clk_branches, |
864 | ARRAY_SIZE(rk3368_clk_branches)); | 868 | ARRAY_SIZE(rk3368_clk_branches)); |
869 | rockchip_clk_protect_critical(rk3368_critical_clocks, | ||
870 | ARRAY_SIZE(rk3368_critical_clocks)); | ||
865 | 871 | ||
866 | rockchip_clk_register_armclk(ARMCLKB, "armclkb", | 872 | rockchip_clk_register_armclk(ARMCLKB, "armclkb", |
867 | mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), | 873 | mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), |
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 7c1e1f58e2da..2fe37f708dc7 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c | |||
@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, | |||
164 | * the values for DIV_COPY and DIV_HPM dividers need not be set. | 164 | * the values for DIV_COPY and DIV_HPM dividers need not be set. |
165 | */ | 165 | */ |
166 | div0 = cfg_data->div0; | 166 | div0 = cfg_data->div0; |
167 | if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { | 167 | if (cpuclk->flags & CLK_CPU_HAS_DIV1) { |
168 | div1 = cfg_data->div1; | 168 | div1 = cfg_data->div1; |
169 | if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) | 169 | if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) |
170 | div1 = readl(base + E4210_DIV_CPU1) & | 170 | div1 = readl(base + E4210_DIV_CPU1) & |
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, | |||
185 | alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; | 185 | alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; |
186 | WARN_ON(alt_div >= MAX_DIV); | 186 | WARN_ON(alt_div >= MAX_DIV); |
187 | 187 | ||
188 | if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { | 188 | if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { |
189 | /* | 189 | /* |
190 | * In Exynos4210, ATB clock parent is also mout_core. So | 190 | * In Exynos4210, ATB clock parent is also mout_core. So |
191 | * ATB clock also needs to be mantained at safe speed. | 191 | * ATB clock also needs to be mantained at safe speed. |
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, | |||
206 | writel(div0, base + E4210_DIV_CPU0); | 206 | writel(div0, base + E4210_DIV_CPU0); |
207 | wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); | 207 | wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); |
208 | 208 | ||
209 | if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { | 209 | if (cpuclk->flags & CLK_CPU_HAS_DIV1) { |
210 | writel(div1, base + E4210_DIV_CPU1); | 210 | writel(div1, base + E4210_DIV_CPU1); |
211 | wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, | 211 | wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, |
212 | DIV_MASK_ALL); | 212 | DIV_MASK_ALL); |
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, | |||
225 | unsigned long mux_reg; | 225 | unsigned long mux_reg; |
226 | 226 | ||
227 | /* find out the divider values to use for clock data */ | 227 | /* find out the divider values to use for clock data */ |
228 | if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { | 228 | if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { |
229 | while ((cfg_data->prate * 1000) != ndata->new_rate) { | 229 | while ((cfg_data->prate * 1000) != ndata->new_rate) { |
230 | if (cfg_data->prate == 0) | 230 | if (cfg_data->prate == 0) |
231 | return -EINVAL; | 231 | return -EINVAL; |
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, | |||
240 | writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); | 240 | writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); |
241 | wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); | 241 | wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); |
242 | 242 | ||
243 | if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { | 243 | if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { |
244 | div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); | 244 | div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); |
245 | div_mask |= E4210_DIV0_ATB_MASK; | 245 | div_mask |= E4210_DIV0_ATB_MASK; |
246 | } | 246 | } |
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 83ccf142ff2a..576cd0354d48 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c | |||
@@ -307,7 +307,7 @@ static const struct clkgen_quadfs_data st_fs660c32_F_416 = { | |||
307 | .get_rate = clk_fs660c32_dig_get_rate, | 307 | .get_rate = clk_fs660c32_dig_get_rate, |
308 | }; | 308 | }; |
309 | 309 | ||
310 | static const struct clkgen_quadfs_data st_fs660c32_C_407 = { | 310 | static const struct clkgen_quadfs_data st_fs660c32_C = { |
311 | .nrst_present = true, | 311 | .nrst_present = true, |
312 | .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), | 312 | .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), |
313 | CLKGEN_FIELD(0x2f0, 0x1, 1), | 313 | CLKGEN_FIELD(0x2f0, 0x1, 1), |
@@ -350,7 +350,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = { | |||
350 | .get_rate = clk_fs660c32_dig_get_rate, | 350 | .get_rate = clk_fs660c32_dig_get_rate, |
351 | }; | 351 | }; |
352 | 352 | ||
353 | static const struct clkgen_quadfs_data st_fs660c32_D_407 = { | 353 | static const struct clkgen_quadfs_data st_fs660c32_D = { |
354 | .nrst_present = true, | 354 | .nrst_present = true, |
355 | .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), | 355 | .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), |
356 | CLKGEN_FIELD(0x2a0, 0x1, 1), | 356 | CLKGEN_FIELD(0x2a0, 0x1, 1), |
@@ -1077,11 +1077,11 @@ static const struct of_device_id quadfs_of_match[] = { | |||
1077 | }, | 1077 | }, |
1078 | { | 1078 | { |
1079 | .compatible = "st,stih407-quadfs660-C", | 1079 | .compatible = "st,stih407-quadfs660-C", |
1080 | .data = &st_fs660c32_C_407 | 1080 | .data = &st_fs660c32_C |
1081 | }, | 1081 | }, |
1082 | { | 1082 | { |
1083 | .compatible = "st,stih407-quadfs660-D", | 1083 | .compatible = "st,stih407-quadfs660-D", |
1084 | .data = &st_fs660c32_D_407 | 1084 | .data = &st_fs660c32_D |
1085 | }, | 1085 | }, |
1086 | {} | 1086 | {} |
1087 | }; | 1087 | }; |
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 47a38a994cac..b2a332cf8985 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c | |||
@@ -193,7 +193,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = { | |||
193 | .ops = &stm_pll3200c32_ops, | 193 | .ops = &stm_pll3200c32_ops, |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { | 196 | static const struct clkgen_pll_data st_pll3200c32_cx_0 = { |
197 | /* 407 C0 PLL0 */ | 197 | /* 407 C0 PLL0 */ |
198 | .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), | 198 | .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), |
199 | .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), | 199 | .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), |
@@ -205,7 +205,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { | |||
205 | .ops = &stm_pll3200c32_ops, | 205 | .ops = &stm_pll3200c32_ops, |
206 | }; | 206 | }; |
207 | 207 | ||
208 | static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = { | 208 | static const struct clkgen_pll_data st_pll3200c32_cx_1 = { |
209 | /* 407 C0 PLL1 */ | 209 | /* 407 C0 PLL1 */ |
210 | .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), | 210 | .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), |
211 | .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), | 211 | .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), |
@@ -624,12 +624,12 @@ static const struct of_device_id c32_pll_of_match[] = { | |||
624 | .data = &st_pll3200c32_407_a0, | 624 | .data = &st_pll3200c32_407_a0, |
625 | }, | 625 | }, |
626 | { | 626 | { |
627 | .compatible = "st,stih407-plls-c32-c0_0", | 627 | .compatible = "st,plls-c32-cx_0", |
628 | .data = &st_pll3200c32_407_c0_0, | 628 | .data = &st_pll3200c32_cx_0, |
629 | }, | 629 | }, |
630 | { | 630 | { |
631 | .compatible = "st,stih407-plls-c32-c0_1", | 631 | .compatible = "st,plls-c32-cx_1", |
632 | .data = &st_pll3200c32_407_c0_1, | 632 | .data = &st_pll3200c32_cx_1, |
633 | }, | 633 | }, |
634 | { | 634 | { |
635 | .compatible = "st,stih407-plls-c32-a9", | 635 | .compatible = "st,stih407-plls-c32-a9", |
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index c2ff859ee0e8..c4e3a52e225b 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c | |||
@@ -682,11 +682,17 @@ static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate) | |||
682 | struct dev_pm_opp *opp; | 682 | struct dev_pm_opp *opp; |
683 | int i, uv; | 683 | int i, uv; |
684 | 684 | ||
685 | rcu_read_lock(); | ||
686 | |||
685 | opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); | 687 | opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); |
686 | if (IS_ERR(opp)) | 688 | if (IS_ERR(opp)) { |
689 | rcu_read_unlock(); | ||
687 | return PTR_ERR(opp); | 690 | return PTR_ERR(opp); |
691 | } | ||
688 | uv = dev_pm_opp_get_voltage(opp); | 692 | uv = dev_pm_opp_get_voltage(opp); |
689 | 693 | ||
694 | rcu_read_unlock(); | ||
695 | |||
690 | for (i = 0; i < td->i2c_lut_size; i++) { | 696 | for (i = 0; i < td->i2c_lut_size; i++) { |
691 | if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) | 697 | if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) |
692 | return i; | 698 | return i; |
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 676ee8f6d813..8831e1a05367 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c | |||
@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { | |||
374 | DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), | 374 | DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), |
375 | DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), | 375 | DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), |
376 | DT_CLK(NULL, "uart3_ick", "uart3_ick"), | 376 | DT_CLK(NULL, "uart3_ick", "uart3_ick"), |
377 | DT_CLK(NULL, "uart4_ick", "uart4_ick"), | ||
378 | DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), | 377 | DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), |
379 | DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), | 378 | DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), |
380 | DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), | 379 | DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), |
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = { | |||
519 | static struct ti_dt_clk omap36xx_clks[] = { | 518 | static struct ti_dt_clk omap36xx_clks[] = { |
520 | DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), | 519 | DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), |
521 | DT_CLK(NULL, "uart4_fck", "uart4_fck"), | 520 | DT_CLK(NULL, "uart4_fck", "uart4_fck"), |
521 | DT_CLK(NULL, "uart4_ick", "uart4_ick"), | ||
522 | { .node_name = NULL }, | 522 | { .node_name = NULL }, |
523 | }; | 523 | }; |
524 | 524 | ||
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9b5b289e6334..a911d7de3377 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include "clock.h" | 19 | #include "clock.h" |
20 | 20 | ||
21 | #define DRA7_DPLL_ABE_DEFFREQ 180633600 | ||
22 | #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 | 21 | #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 |
23 | #define DRA7_DPLL_USB_DEFFREQ 960000000 | 22 | #define DRA7_DPLL_USB_DEFFREQ 960000000 |
24 | 23 | ||
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = { | |||
313 | int __init dra7xx_dt_clk_init(void) | 312 | int __init dra7xx_dt_clk_init(void) |
314 | { | 313 | { |
315 | int rc; | 314 | int rc; |
316 | struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; | 315 | struct clk *dpll_ck, *hdcp_ck; |
317 | 316 | ||
318 | ti_dt_clocks_register(dra7xx_clks); | 317 | ti_dt_clocks_register(dra7xx_clks); |
319 | 318 | ||
320 | omap2_clk_disable_autoidle_all(); | 319 | omap2_clk_disable_autoidle_all(); |
321 | 320 | ||
322 | abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); | ||
323 | sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); | ||
324 | dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); | ||
325 | |||
326 | rc = clk_set_parent(abe_dpll_mux, sys_clkin2); | ||
327 | if (!rc) | ||
328 | rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); | ||
329 | if (rc) | ||
330 | pr_err("%s: failed to configure ABE DPLL!\n", __func__); | ||
331 | |||
332 | dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); | ||
333 | rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); | ||
334 | if (rc) | ||
335 | pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); | ||
336 | |||
337 | dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); | 321 | dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); |
338 | rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); | 322 | rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); |
339 | if (rc) | 323 | if (rc) |
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index 90d7d8a21c49..1ddc288fce4e 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c | |||
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | if (unlikely(!clk->enable_reg)) { | 225 | if (unlikely(IS_ERR(clk->enable_reg))) { |
226 | pr_err("%s: %s missing enable_reg\n", __func__, | 226 | pr_err("%s: %s missing enable_reg\n", __func__, |
227 | clk_hw_get_name(hw)); | 227 | clk_hw_get_name(hw)); |
228 | ret = -EINVAL; | 228 | ret = -EINVAL; |
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) | |||
264 | u32 v; | 264 | u32 v; |
265 | 265 | ||
266 | clk = to_clk_hw_omap(hw); | 266 | clk = to_clk_hw_omap(hw); |
267 | if (!clk->enable_reg) { | 267 | if (IS_ERR(clk->enable_reg)) { |
268 | /* | 268 | /* |
269 | * 'independent' here refers to a clock which is not | 269 | * 'independent' here refers to a clock which is not |
270 | * controlled by its parent. | 270 | * controlled by its parent. |
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c | |||
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) | |||
148 | bc_timer.freq = clk_get_rate(timer_clk); | 148 | bc_timer.freq = clk_get_rate(timer_clk); |
149 | 149 | ||
150 | irq = irq_of_parse_and_map(np, 0); | 150 | irq = irq_of_parse_and_map(np, 0); |
151 | if (irq == NO_IRQ) { | 151 | if (!irq) { |
152 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); | 152 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); |
153 | return; | 153 | return; |
154 | } | 154 | } |
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c | |||
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) | |||
152 | int irq, error; | 152 | int irq, error; |
153 | 153 | ||
154 | irq = irq_of_parse_and_map(np, 0); | 154 | irq = irq_of_parse_and_map(np, 0); |
155 | if (irq == NO_IRQ) { | 155 | if (!irq) { |
156 | pr_err("%s: failed to map interrupts\n", __func__); | 156 | pr_err("%s: failed to map interrupts\n", __func__); |
157 | return; | 157 | return; |
158 | } | 158 | } |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 15b921a9248c..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) | |||
149 | { | 149 | { |
150 | struct acpi_cpufreq_data *data = policy->driver_data; | 150 | struct acpi_cpufreq_data *data = policy->driver_data; |
151 | 151 | ||
152 | if (unlikely(!data)) | ||
153 | return -ENODEV; | ||
154 | |||
152 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); | 155 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); |
153 | } | 156 | } |
154 | 157 | ||
@@ -375,12 +378,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
375 | 378 | ||
376 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); | 379 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
377 | 380 | ||
378 | policy = cpufreq_cpu_get(cpu); | 381 | policy = cpufreq_cpu_get_raw(cpu); |
379 | if (unlikely(!policy)) | 382 | if (unlikely(!policy)) |
380 | return 0; | 383 | return 0; |
381 | 384 | ||
382 | data = policy->driver_data; | 385 | data = policy->driver_data; |
383 | cpufreq_cpu_put(policy); | ||
384 | if (unlikely(!data || !data->freq_table)) | 386 | if (unlikely(!data || !data->freq_table)) |
385 | return 0; | 387 | return 0; |
386 | 388 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6633b3fa996e..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -238,13 +238,13 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, | |||
238 | } | 238 | } |
239 | EXPORT_SYMBOL_GPL(cpufreq_generic_init); | 239 | EXPORT_SYMBOL_GPL(cpufreq_generic_init); |
240 | 240 | ||
241 | /* Only for cpufreq core internal use */ | 241 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) |
242 | static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) | ||
243 | { | 242 | { |
244 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | 243 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); |
245 | 244 | ||
246 | return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; | 245 | return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; |
247 | } | 246 | } |
247 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); | ||
248 | 248 | ||
249 | unsigned int cpufreq_generic_get(unsigned int cpu) | 249 | unsigned int cpufreq_generic_get(unsigned int cpu) |
250 | { | 250 | { |
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) | |||
1436 | * since this is a core component, and is essential for the | 1436 | * since this is a core component, and is essential for the |
1437 | * subsequent light-weight ->init() to succeed. | 1437 | * subsequent light-weight ->init() to succeed. |
1438 | */ | 1438 | */ |
1439 | if (cpufreq_driver->exit) | 1439 | if (cpufreq_driver->exit) { |
1440 | cpufreq_driver->exit(policy); | 1440 | cpufreq_driver->exit(policy); |
1441 | policy->freq_table = NULL; | ||
1442 | } | ||
1441 | } | 1443 | } |
1442 | 1444 | ||
1443 | /** | 1445 | /** |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3af9dd7332e6..aa33b92b3e3e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu) | |||
776 | local_irq_save(flags); | 776 | local_irq_save(flags); |
777 | rdmsrl(MSR_IA32_APERF, aperf); | 777 | rdmsrl(MSR_IA32_APERF, aperf); |
778 | rdmsrl(MSR_IA32_MPERF, mperf); | 778 | rdmsrl(MSR_IA32_MPERF, mperf); |
779 | if (cpu->prev_mperf == mperf) { | ||
780 | local_irq_restore(flags); | ||
781 | return; | ||
782 | } | ||
783 | |||
779 | tsc = rdtsc(); | 784 | tsc = rdtsc(); |
780 | local_irq_restore(flags); | 785 | local_irq_restore(flags); |
781 | 786 | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 07bc7aa6b224..d234719065a5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -461,7 +461,7 @@ config CRYPTO_DEV_QCE | |||
461 | 461 | ||
462 | config CRYPTO_DEV_VMX | 462 | config CRYPTO_DEV_VMX |
463 | bool "Support for VMX cryptographic acceleration instructions" | 463 | bool "Support for VMX cryptographic acceleration instructions" |
464 | depends on PPC64 | 464 | depends on PPC64 && VSX |
465 | help | 465 | help |
466 | Support for VMX cryptographic acceleration instructions. | 466 | Support for VMX cryptographic acceleration instructions. |
467 | 467 | ||
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b60698b30d30..bc2a55bc35e4 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h | |||
@@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) | |||
687 | 687 | ||
688 | int mv_cesa_queue_req(struct crypto_async_request *req); | 688 | int mv_cesa_queue_req(struct crypto_async_request *req); |
689 | 689 | ||
690 | /* | ||
691 | * Helper function that indicates whether a crypto request needs to be | ||
692 | * cleaned up or not after being enqueued using mv_cesa_queue_req(). | ||
693 | */ | ||
694 | static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, | ||
695 | int ret) | ||
696 | { | ||
697 | /* | ||
698 | * The queue still had some space, the request was queued | ||
699 | * normally, so there's no need to clean it up. | ||
700 | */ | ||
701 | if (ret == -EINPROGRESS) | ||
702 | return false; | ||
703 | |||
704 | /* | ||
705 | * The queue had not space left, but since the request is | ||
706 | * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to | ||
707 | * the backlog and will be processed later. There's no need to | ||
708 | * clean it up. | ||
709 | */ | ||
710 | if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) | ||
711 | return false; | ||
712 | |||
713 | /* Request wasn't queued, we need to clean it up */ | ||
714 | return true; | ||
715 | } | ||
716 | |||
690 | /* TDMA functions */ | 717 | /* TDMA functions */ |
691 | 718 | ||
692 | static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, | 719 | static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, |
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 0745cf3b9c0e..3df2f4e7adb2 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c | |||
@@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, | |||
189 | { | 189 | { |
190 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | 190 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); |
191 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | 191 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); |
192 | |||
193 | creq->req.base.engine = engine; | 192 | creq->req.base.engine = engine; |
194 | 193 | ||
195 | if (creq->req.base.type == CESA_DMA_REQ) | 194 | if (creq->req.base.type == CESA_DMA_REQ) |
@@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req, | |||
431 | return ret; | 430 | return ret; |
432 | 431 | ||
433 | ret = mv_cesa_queue_req(&req->base); | 432 | ret = mv_cesa_queue_req(&req->base); |
434 | if (ret && ret != -EINPROGRESS) | 433 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
435 | mv_cesa_ablkcipher_cleanup(req); | 434 | mv_cesa_ablkcipher_cleanup(req); |
436 | 435 | ||
437 | return ret; | 436 | return ret; |
@@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req, | |||
551 | return ret; | 550 | return ret; |
552 | 551 | ||
553 | ret = mv_cesa_queue_req(&req->base); | 552 | ret = mv_cesa_queue_req(&req->base); |
554 | if (ret && ret != -EINPROGRESS) | 553 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
555 | mv_cesa_ablkcipher_cleanup(req); | 554 | mv_cesa_ablkcipher_cleanup(req); |
556 | 555 | ||
557 | return ret; | 556 | return ret; |
@@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, | |||
693 | return ret; | 692 | return ret; |
694 | 693 | ||
695 | ret = mv_cesa_queue_req(&req->base); | 694 | ret = mv_cesa_queue_req(&req->base); |
696 | if (ret && ret != -EINPROGRESS) | 695 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
697 | mv_cesa_ablkcipher_cleanup(req); | 696 | mv_cesa_ablkcipher_cleanup(req); |
698 | 697 | ||
699 | return ret; | 698 | return ret; |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index ae9272eb9c1a..e8d0d7128137 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
@@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req) | |||
739 | return 0; | 739 | return 0; |
740 | 740 | ||
741 | ret = mv_cesa_queue_req(&req->base); | 741 | ret = mv_cesa_queue_req(&req->base); |
742 | if (ret && ret != -EINPROGRESS) { | 742 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
743 | mv_cesa_ahash_cleanup(req); | 743 | mv_cesa_ahash_cleanup(req); |
744 | return ret; | ||
745 | } | ||
746 | 744 | ||
747 | return ret; | 745 | return ret; |
748 | } | 746 | } |
@@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req) | |||
766 | return 0; | 764 | return 0; |
767 | 765 | ||
768 | ret = mv_cesa_queue_req(&req->base); | 766 | ret = mv_cesa_queue_req(&req->base); |
769 | if (ret && ret != -EINPROGRESS) | 767 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
770 | mv_cesa_ahash_cleanup(req); | 768 | mv_cesa_ahash_cleanup(req); |
771 | 769 | ||
772 | return ret; | 770 | return ret; |
@@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) | |||
791 | return 0; | 789 | return 0; |
792 | 790 | ||
793 | ret = mv_cesa_queue_req(&req->base); | 791 | ret = mv_cesa_queue_req(&req->base); |
794 | if (ret && ret != -EINPROGRESS) | 792 | if (mv_cesa_req_needs_cleanup(&req->base, ret)) |
795 | mv_cesa_ahash_cleanup(req); | 793 | mv_cesa_ahash_cleanup(req); |
796 | 794 | ||
797 | return ret; | 795 | return ret; |
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index a57b4194de28..0a5ca0ba5d64 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
@@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) | |||
88 | struct pci_dev *parent = pdev->bus->self; | 88 | struct pci_dev *parent = pdev->bus->self; |
89 | uint16_t bridge_ctl = 0; | 89 | uint16_t bridge_ctl = 0; |
90 | 90 | ||
91 | if (accel_dev->is_vf) | ||
92 | return; | ||
93 | |||
91 | dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", | 94 | dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", |
92 | accel_dev->accel_id); | 95 | accel_dev->accel_id); |
93 | 96 | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index e070c316e8b7..a19ee127edca 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
@@ -104,7 +104,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
104 | sg_miter_next(&mo); | 104 | sg_miter_next(&mo); |
105 | oo = 0; | 105 | oo = 0; |
106 | } | 106 | } |
107 | } while (mo.length > 0); | 107 | } while (oleft > 0); |
108 | 108 | ||
109 | if (areq->info) { | 109 | if (areq->info) { |
110 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | 110 | for (i = 0; i < 4 && i < ivsize / 4; i++) { |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ca1b362d77e2..ca848cc6a8fd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -53,7 +53,7 @@ static struct devfreq *find_device_devfreq(struct device *dev) | |||
53 | { | 53 | { |
54 | struct devfreq *tmp_devfreq; | 54 | struct devfreq *tmp_devfreq; |
55 | 55 | ||
56 | if (unlikely(IS_ERR_OR_NULL(dev))) { | 56 | if (IS_ERR_OR_NULL(dev)) { |
57 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | 57 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); |
58 | return ERR_PTR(-EINVAL); | 58 | return ERR_PTR(-EINVAL); |
59 | } | 59 | } |
@@ -133,7 +133,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name) | |||
133 | { | 133 | { |
134 | struct devfreq_governor *tmp_governor; | 134 | struct devfreq_governor *tmp_governor; |
135 | 135 | ||
136 | if (unlikely(IS_ERR_OR_NULL(name))) { | 136 | if (IS_ERR_OR_NULL(name)) { |
137 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | 137 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); |
138 | return ERR_PTR(-EINVAL); | 138 | return ERR_PTR(-EINVAL); |
139 | } | 139 | } |
@@ -177,10 +177,10 @@ int update_devfreq(struct devfreq *devfreq) | |||
177 | return err; | 177 | return err; |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * Adjust the freuqency with user freq and QoS. | 180 | * Adjust the frequency with user freq and QoS. |
181 | * | 181 | * |
182 | * List from the highest proiority | 182 | * List from the highest priority |
183 | * max_freq (probably called by thermal when it's too hot) | 183 | * max_freq |
184 | * min_freq | 184 | * min_freq |
185 | */ | 185 | */ |
186 | 186 | ||
@@ -482,7 +482,7 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
482 | devfreq->profile->max_state * | 482 | devfreq->profile->max_state * |
483 | devfreq->profile->max_state, | 483 | devfreq->profile->max_state, |
484 | GFP_KERNEL); | 484 | GFP_KERNEL); |
485 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * | 485 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * |
486 | devfreq->profile->max_state, | 486 | devfreq->profile->max_state, |
487 | GFP_KERNEL); | 487 | GFP_KERNEL); |
488 | devfreq->last_stat_updated = jiffies; | 488 | devfreq->last_stat_updated = jiffies; |
@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
492 | if (err) { | 492 | if (err) { |
493 | put_device(&devfreq->dev); | 493 | put_device(&devfreq->dev); |
494 | mutex_unlock(&devfreq->lock); | 494 | mutex_unlock(&devfreq->lock); |
495 | goto err_dev; | 495 | goto err_out; |
496 | } | 496 | } |
497 | 497 | ||
498 | mutex_unlock(&devfreq->lock); | 498 | mutex_unlock(&devfreq->lock); |
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
518 | err_init: | 518 | err_init: |
519 | list_del(&devfreq->node); | 519 | list_del(&devfreq->node); |
520 | device_unregister(&devfreq->dev); | 520 | device_unregister(&devfreq->dev); |
521 | err_dev: | ||
522 | kfree(devfreq); | 521 | kfree(devfreq); |
523 | err_out: | 522 | err_out: |
524 | return ERR_PTR(err); | 523 | return ERR_PTR(err); |
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, | |||
795 | ret = PTR_ERR(governor); | 794 | ret = PTR_ERR(governor); |
796 | goto out; | 795 | goto out; |
797 | } | 796 | } |
798 | if (df->governor == governor) | 797 | if (df->governor == governor) { |
798 | ret = 0; | ||
799 | goto out; | 799 | goto out; |
800 | } | ||
800 | 801 | ||
801 | if (df->governor) { | 802 | if (df->governor) { |
802 | ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); | 803 | ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); |
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index f9901f52a225..f312485f1451 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c | |||
@@ -319,7 +319,8 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev, | |||
319 | case PPMU_PMNCNT3: | 319 | case PPMU_PMNCNT3: |
320 | pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH); | 320 | pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH); |
321 | pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW); | 321 | pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW); |
322 | load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low; | 322 | load_count = ((u64)((pmcnt_high & 0xff)) << 32) |
323 | + (u64)pmcnt_low; | ||
323 | break; | 324 | break; |
324 | } | 325 | } |
325 | edata->load_count = load_count; | 326 | edata->load_count = load_count; |
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 0720ba84ca92..ae72ba5e78df 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c | |||
@@ -21,17 +21,20 @@ | |||
21 | static int devfreq_simple_ondemand_func(struct devfreq *df, | 21 | static int devfreq_simple_ondemand_func(struct devfreq *df, |
22 | unsigned long *freq) | 22 | unsigned long *freq) |
23 | { | 23 | { |
24 | struct devfreq_dev_status stat; | 24 | int err; |
25 | int err = df->profile->get_dev_status(df->dev.parent, &stat); | 25 | struct devfreq_dev_status *stat; |
26 | unsigned long long a, b; | 26 | unsigned long long a, b; |
27 | unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; | 27 | unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; |
28 | unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; | 28 | unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; |
29 | struct devfreq_simple_ondemand_data *data = df->data; | 29 | struct devfreq_simple_ondemand_data *data = df->data; |
30 | unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; | 30 | unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; |
31 | 31 | ||
32 | err = devfreq_update_stats(df); | ||
32 | if (err) | 33 | if (err) |
33 | return err; | 34 | return err; |
34 | 35 | ||
36 | stat = &df->last_status; | ||
37 | |||
35 | if (data) { | 38 | if (data) { |
36 | if (data->upthreshold) | 39 | if (data->upthreshold) |
37 | dfso_upthreshold = data->upthreshold; | 40 | dfso_upthreshold = data->upthreshold; |
@@ -43,41 +46,41 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
43 | return -EINVAL; | 46 | return -EINVAL; |
44 | 47 | ||
45 | /* Assume MAX if it is going to be divided by zero */ | 48 | /* Assume MAX if it is going to be divided by zero */ |
46 | if (stat.total_time == 0) { | 49 | if (stat->total_time == 0) { |
47 | *freq = max; | 50 | *freq = max; |
48 | return 0; | 51 | return 0; |
49 | } | 52 | } |
50 | 53 | ||
51 | /* Prevent overflow */ | 54 | /* Prevent overflow */ |
52 | if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { | 55 | if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { |
53 | stat.busy_time >>= 7; | 56 | stat->busy_time >>= 7; |
54 | stat.total_time >>= 7; | 57 | stat->total_time >>= 7; |
55 | } | 58 | } |
56 | 59 | ||
57 | /* Set MAX if it's busy enough */ | 60 | /* Set MAX if it's busy enough */ |
58 | if (stat.busy_time * 100 > | 61 | if (stat->busy_time * 100 > |
59 | stat.total_time * dfso_upthreshold) { | 62 | stat->total_time * dfso_upthreshold) { |
60 | *freq = max; | 63 | *freq = max; |
61 | return 0; | 64 | return 0; |
62 | } | 65 | } |
63 | 66 | ||
64 | /* Set MAX if we do not know the initial frequency */ | 67 | /* Set MAX if we do not know the initial frequency */ |
65 | if (stat.current_frequency == 0) { | 68 | if (stat->current_frequency == 0) { |
66 | *freq = max; | 69 | *freq = max; |
67 | return 0; | 70 | return 0; |
68 | } | 71 | } |
69 | 72 | ||
70 | /* Keep the current frequency */ | 73 | /* Keep the current frequency */ |
71 | if (stat.busy_time * 100 > | 74 | if (stat->busy_time * 100 > |
72 | stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { | 75 | stat->total_time * (dfso_upthreshold - dfso_downdifferential)) { |
73 | *freq = stat.current_frequency; | 76 | *freq = stat->current_frequency; |
74 | return 0; | 77 | return 0; |
75 | } | 78 | } |
76 | 79 | ||
77 | /* Set the desired frequency based on the load */ | 80 | /* Set the desired frequency based on the load */ |
78 | a = stat.busy_time; | 81 | a = stat->busy_time; |
79 | a *= stat.current_frequency; | 82 | a *= stat->current_frequency; |
80 | b = div_u64(a, stat.total_time); | 83 | b = div_u64(a, stat->total_time); |
81 | b *= 100; | 84 | b *= 100; |
82 | b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); | 85 | b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); |
83 | *freq = (unsigned long) b; | 86 | *freq = (unsigned long) b; |
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 13a1a6e8108c..848b93ee930f 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c | |||
@@ -541,18 +541,20 @@ static struct devfreq_dev_profile tegra_devfreq_profile = { | |||
541 | static int tegra_governor_get_target(struct devfreq *devfreq, | 541 | static int tegra_governor_get_target(struct devfreq *devfreq, |
542 | unsigned long *freq) | 542 | unsigned long *freq) |
543 | { | 543 | { |
544 | struct devfreq_dev_status stat; | 544 | struct devfreq_dev_status *stat; |
545 | struct tegra_devfreq *tegra; | 545 | struct tegra_devfreq *tegra; |
546 | struct tegra_devfreq_device *dev; | 546 | struct tegra_devfreq_device *dev; |
547 | unsigned long target_freq = 0; | 547 | unsigned long target_freq = 0; |
548 | unsigned int i; | 548 | unsigned int i; |
549 | int err; | 549 | int err; |
550 | 550 | ||
551 | err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat); | 551 | err = devfreq_update_stats(devfreq); |
552 | if (err) | 552 | if (err) |
553 | return err; | 553 | return err; |
554 | 554 | ||
555 | tegra = stat.private_data; | 555 | stat = &devfreq->last_status; |
556 | |||
557 | tegra = stat->private_data; | ||
556 | 558 | ||
557 | for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { | 559 | for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { |
558 | dev = &tegra->devices[i]; | 560 | dev = &tegra->devices[i]; |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | |||
455 | return desc; | 455 | return desc; |
456 | } | 456 | } |
457 | 457 | ||
458 | void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) | ||
459 | { | ||
460 | memset(&desc->lld, 0, sizeof(desc->lld)); | ||
461 | INIT_LIST_HEAD(&desc->descs_list); | ||
462 | desc->direction = DMA_TRANS_NONE; | ||
463 | desc->xfer_size = 0; | ||
464 | desc->active_xfer = false; | ||
465 | } | ||
466 | |||
458 | /* Call must be protected by lock. */ | 467 | /* Call must be protected by lock. */ |
459 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | 468 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) |
460 | { | 469 | { |
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | |||
466 | desc = list_first_entry(&atchan->free_descs_list, | 475 | desc = list_first_entry(&atchan->free_descs_list, |
467 | struct at_xdmac_desc, desc_node); | 476 | struct at_xdmac_desc, desc_node); |
468 | list_del(&desc->desc_node); | 477 | list_del(&desc->desc_node); |
469 | desc->active_xfer = false; | 478 | at_xdmac_init_used_desc(desc); |
470 | } | 479 | } |
471 | 480 | ||
472 | return desc; | 481 | return desc; |
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
875 | 884 | ||
876 | if (xt->src_inc) { | 885 | if (xt->src_inc) { |
877 | if (xt->src_sgl) | 886 | if (xt->src_sgl) |
878 | chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; | 887 | chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; |
879 | else | 888 | else |
880 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; | 889 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; |
881 | } | 890 | } |
882 | 891 | ||
883 | if (xt->dst_inc) { | 892 | if (xt->dst_inc) { |
884 | if (xt->dst_sgl) | 893 | if (xt->dst_sgl) |
885 | chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; | 894 | chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; |
886 | else | 895 | else |
887 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; | 896 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; |
888 | } | 897 | } |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |||
554 | mutex_lock(&dma_list_mutex); | 554 | mutex_lock(&dma_list_mutex); |
555 | 555 | ||
556 | if (chan->client_count == 0) { | 556 | if (chan->client_count == 0) { |
557 | struct dma_device *device = chan->device; | ||
558 | |||
559 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
560 | device->privatecnt++; | ||
557 | err = dma_chan_get(chan); | 561 | err = dma_chan_get(chan); |
558 | if (err) | 562 | if (err) { |
559 | pr_debug("%s: failed to get %s: (%d)\n", | 563 | pr_debug("%s: failed to get %s: (%d)\n", |
560 | __func__, dma_chan_name(chan), err); | 564 | __func__, dma_chan_name(chan), err); |
565 | chan = NULL; | ||
566 | if (--device->privatecnt == 0) | ||
567 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
568 | } | ||
561 | } else | 569 | } else |
562 | chan = NULL; | 570 | chan = NULL; |
563 | 571 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1591 | INIT_LIST_HEAD(&dw->dma.channels); | 1591 | INIT_LIST_HEAD(&dw->dma.channels); |
1592 | for (i = 0; i < nr_channels; i++) { | 1592 | for (i = 0; i < nr_channels; i++) { |
1593 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1593 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1594 | int r = nr_channels - i - 1; | ||
1595 | 1594 | ||
1596 | dwc->chan.device = &dw->dma; | 1595 | dwc->chan.device = &dw->dma; |
1597 | dma_cookie_init(&dwc->chan); | 1596 | dma_cookie_init(&dwc->chan); |
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1603 | 1602 | ||
1604 | /* 7 is highest priority & 0 is lowest. */ | 1603 | /* 7 is highest priority & 0 is lowest. */ |
1605 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1604 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1606 | dwc->priority = r; | 1605 | dwc->priority = nr_channels - i - 1; |
1607 | else | 1606 | else |
1608 | dwc->priority = i; | 1607 | dwc->priority = i; |
1609 | 1608 | ||
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1622 | /* Hardware configuration */ | 1621 | /* Hardware configuration */ |
1623 | if (autocfg) { | 1622 | if (autocfg) { |
1624 | unsigned int dwc_params; | 1623 | unsigned int dwc_params; |
1624 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | ||
1625 | void __iomem *addr = chip->regs + r * sizeof(u32); | 1625 | void __iomem *addr = chip->regs + r * sizeof(u32); |
1626 | 1626 | ||
1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); | 1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) | |||
355 | struct idma64_desc *desc = idma64c->desc; | 355 | struct idma64_desc *desc = idma64c->desc; |
356 | struct idma64_hw_desc *hw; | 356 | struct idma64_hw_desc *hw; |
357 | size_t bytes = desc->length; | 357 | size_t bytes = desc->length; |
358 | u64 llp; | 358 | u64 llp = channel_readq(idma64c, LLP); |
359 | u32 ctlhi; | 359 | u32 ctlhi = channel_readl(idma64c, CTL_HI); |
360 | unsigned int i = 0; | 360 | unsigned int i = 0; |
361 | 361 | ||
362 | llp = channel_readq(idma64c, LLP); | ||
363 | do { | 362 | do { |
364 | hw = &desc->hw[i]; | 363 | hw = &desc->hw[i]; |
365 | } while ((hw->llp != llp) && (++i < desc->ndesc)); | 364 | if (hw->llp == llp) |
365 | break; | ||
366 | bytes -= hw->len; | ||
367 | } while (++i < desc->ndesc); | ||
366 | 368 | ||
367 | if (!i) | 369 | if (!i) |
368 | return bytes; | 370 | return bytes; |
369 | 371 | ||
370 | do { | 372 | /* The current chunk is not fully transfered yet */ |
371 | bytes -= desc->hw[--i].len; | 373 | bytes += desc->hw[--i].len; |
372 | } while (i); | ||
373 | 374 | ||
374 | ctlhi = channel_readl(idma64c, CTL_HI); | ||
375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); | 375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); |
376 | } | 376 | } |
377 | 377 | ||
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 4768a829253a..2bf37e68ad0f 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -266,7 +266,7 @@ int ipu_irq_unmap(unsigned int source) | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /* Chained IRQ handler for IPU function and error interrupt */ | 268 | /* Chained IRQ handler for IPU function and error interrupt */ |
269 | static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc) | 269 | static void ipu_irq_handler(struct irq_desc *desc) |
270 | { | 270 | { |
271 | struct ipu *ipu = irq_desc_get_handler_data(desc); | 271 | struct ipu *ipu = irq_desc_get_handler_data(desc); |
272 | u32 status; | 272 | u32 status; |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) | |||
473 | return; | 473 | return; |
474 | 474 | ||
475 | /* clear the channel mapping in DRCMR */ | 475 | /* clear the channel mapping in DRCMR */ |
476 | reg = pxad_drcmr(chan->drcmr); | 476 | if (chan->drcmr <= DRCMR_CHLNUM) { |
477 | writel_relaxed(0, chan->phy->base + reg); | 477 | reg = pxad_drcmr(chan->drcmr); |
478 | writel_relaxed(0, chan->phy->base + reg); | ||
479 | } | ||
478 | 480 | ||
479 | spin_lock_irqsave(&pdev->phy_lock, flags); | 481 | spin_lock_irqsave(&pdev->phy_lock, flags); |
480 | for (i = 0; i < 32; i++) | 482 | for (i = 0; i < 32; i++) |
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) | |||
516 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, | 518 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, |
517 | phy, phy->idx, misaligned); | 519 | phy, phy->idx, misaligned); |
518 | 520 | ||
519 | reg = pxad_drcmr(phy->vchan->drcmr); | 521 | if (phy->vchan->drcmr <= DRCMR_CHLNUM) { |
520 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 522 | reg = pxad_drcmr(phy->vchan->drcmr); |
523 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
524 | } | ||
521 | 525 | ||
522 | dalgn = phy_readl_relaxed(phy, DALGN); | 526 | dalgn = phy_readl_relaxed(phy, DALGN); |
523 | if (misaligned) | 527 | if (misaligned) |
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, | |||
887 | struct dma_async_tx_descriptor *tx; | 891 | struct dma_async_tx_descriptor *tx; |
888 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); | 892 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); |
889 | 893 | ||
894 | INIT_LIST_HEAD(&vd->node); | ||
890 | tx = vchan_tx_prep(vc, vd, tx_flags); | 895 | tx = vchan_tx_prep(vc, vd, tx_flags); |
891 | tx->tx_submit = pxad_tx_submit; | 896 | tx->tx_submit = pxad_tx_submit; |
892 | dev_dbg(&chan->vc.chan.dev->device, | 897 | dev_dbg(&chan->vc.chan.dev->device, |
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, | |||
910 | width = chan->cfg.src_addr_width; | 915 | width = chan->cfg.src_addr_width; |
911 | dev_addr = chan->cfg.src_addr; | 916 | dev_addr = chan->cfg.src_addr; |
912 | *dev_src = dev_addr; | 917 | *dev_src = dev_addr; |
913 | *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; | 918 | *dcmd |= PXA_DCMD_INCTRGADDR; |
919 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
920 | *dcmd |= PXA_DCMD_FLOWSRC; | ||
914 | } | 921 | } |
915 | if (dir == DMA_MEM_TO_DEV) { | 922 | if (dir == DMA_MEM_TO_DEV) { |
916 | maxburst = chan->cfg.dst_maxburst; | 923 | maxburst = chan->cfg.dst_maxburst; |
917 | width = chan->cfg.dst_addr_width; | 924 | width = chan->cfg.dst_addr_width; |
918 | dev_addr = chan->cfg.dst_addr; | 925 | dev_addr = chan->cfg.dst_addr; |
919 | *dev_dst = dev_addr; | 926 | *dev_dst = dev_addr; |
920 | *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; | 927 | *dcmd |= PXA_DCMD_INCSRCADDR; |
928 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
929 | *dcmd |= PXA_DCMD_FLOWTRG; | ||
921 | } | 930 | } |
922 | if (dir == DMA_MEM_TO_MEM) | 931 | if (dir == DMA_MEM_TO_MEM) |
923 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | | 932 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, | |||
1177 | else | 1186 | else |
1178 | curr = phy_readl_relaxed(chan->phy, DTADR); | 1187 | curr = phy_readl_relaxed(chan->phy, DTADR); |
1179 | 1188 | ||
1189 | /* | ||
1190 | * curr has to be actually read before checking descriptor | ||
1191 | * completion, so that a curr inside a status updater | ||
1192 | * descriptor implies the following test returns true, and | ||
1193 | * preventing reordering of curr load and the test. | ||
1194 | */ | ||
1195 | rmb(); | ||
1196 | if (is_desc_completed(vd)) | ||
1197 | goto out; | ||
1198 | |||
1180 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { | 1199 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { |
1181 | hw_desc = sw_desc->hw_desc[i]; | 1200 | hw_desc = sw_desc->hw_desc[i]; |
1182 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) | 1201 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) |
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) | |||
599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) | 599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) |
600 | { | 600 | { |
601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); | 601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); |
602 | struct sun4i_dma_promise *promise; | 602 | struct sun4i_dma_promise *promise, *tmp; |
603 | 603 | ||
604 | /* Free all the demands and completed demands */ | 604 | /* Free all the demands and completed demands */ |
605 | list_for_each_entry(promise, &contract->demands, list) | 605 | list_for_each_entry_safe(promise, tmp, &contract->demands, list) |
606 | kfree(promise); | 606 | kfree(promise); |
607 | 607 | ||
608 | list_for_each_entry(promise, &contract->completed_demands, list) | 608 | list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) |
609 | kfree(promise); | 609 | kfree(promise); |
610 | 610 | ||
611 | kfree(contract); | 611 | kfree(contract); |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | 59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 |
60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | 60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 |
61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | 61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF |
62 | #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) | ||
63 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) | 62 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) |
64 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | 63 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) |
65 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | 64 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C |
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | |||
379 | return flyby_type[src_cnt]; | 378 | return flyby_type[src_cnt]; |
380 | } | 379 | } |
381 | 380 | ||
382 | static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | ||
383 | { | ||
384 | u32 __iomem *cmd_base = ring->cmd_base; | ||
385 | u32 ring_state = ioread32(&cmd_base[1]); | ||
386 | |||
387 | return XGENE_DMA_RING_DESC_CNT(ring_state); | ||
388 | } | ||
389 | |||
390 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, | 381 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
391 | dma_addr_t *paddr) | 382 | dma_addr_t *paddr) |
392 | { | 383 | { |
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | |||
659 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | 650 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); |
660 | } | 651 | } |
661 | 652 | ||
662 | static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | 653 | static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, |
663 | struct xgene_dma_desc_sw *desc_sw) | 654 | struct xgene_dma_desc_sw *desc_sw) |
664 | { | 655 | { |
656 | struct xgene_dma_ring *ring = &chan->tx_ring; | ||
665 | struct xgene_dma_desc_hw *desc_hw; | 657 | struct xgene_dma_desc_hw *desc_hw; |
666 | 658 | ||
667 | /* Check if can push more descriptor to hw for execution */ | ||
668 | if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) | ||
669 | return -EBUSY; | ||
670 | |||
671 | /* Get hw descriptor from DMA tx ring */ | 659 | /* Get hw descriptor from DMA tx ring */ |
672 | desc_hw = &ring->desc_hw[ring->head]; | 660 | desc_hw = &ring->desc_hw[ring->head]; |
673 | 661 | ||
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
694 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | 682 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); |
695 | } | 683 | } |
696 | 684 | ||
685 | /* Increment the pending transaction count */ | ||
686 | chan->pending += ((desc_sw->flags & | ||
687 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
688 | |||
697 | /* Notify the hw that we have descriptor ready for execution */ | 689 | /* Notify the hw that we have descriptor ready for execution */ |
698 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | 690 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? |
699 | 2 : 1, ring->cmd); | 691 | 2 : 1, ring->cmd); |
700 | |||
701 | return 0; | ||
702 | } | 692 | } |
703 | 693 | ||
704 | /** | 694 | /** |
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
710 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | 700 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) |
711 | { | 701 | { |
712 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | 702 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; |
713 | int ret; | ||
714 | 703 | ||
715 | /* | 704 | /* |
716 | * If the list of pending descriptors is empty, then we | 705 | * If the list of pending descriptors is empty, then we |
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | |||
735 | if (chan->pending >= chan->max_outstanding) | 724 | if (chan->pending >= chan->max_outstanding) |
736 | return; | 725 | return; |
737 | 726 | ||
738 | ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); | 727 | xgene_chan_xfer_request(chan, desc_sw); |
739 | if (ret) | ||
740 | return; | ||
741 | 728 | ||
742 | /* | 729 | /* |
743 | * Delete this element from ld pending queue and append it to | 730 | * Delete this element from ld pending queue and append it to |
744 | * ld running queue | 731 | * ld running queue |
745 | */ | 732 | */ |
746 | list_move_tail(&desc_sw->node, &chan->ld_running); | 733 | list_move_tail(&desc_sw->node, &chan->ld_running); |
747 | |||
748 | /* Increment the pending transaction count */ | ||
749 | chan->pending++; | ||
750 | } | 734 | } |
751 | } | 735 | } |
752 | 736 | ||
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |||
821 | * Decrement the pending transaction count | 805 | * Decrement the pending transaction count |
822 | * as we have processed one | 806 | * as we have processed one |
823 | */ | 807 | */ |
824 | chan->pending--; | 808 | chan->pending -= ((desc_sw->flags & |
809 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
825 | 810 | ||
826 | /* | 811 | /* |
827 | * Delete this node from ld running queue and append it to | 812 | * Delete this node from ld running queue and append it to |
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |||
1421 | struct xgene_dma_ring *ring, | 1406 | struct xgene_dma_ring *ring, |
1422 | enum xgene_dma_ring_cfgsize cfgsize) | 1407 | enum xgene_dma_ring_cfgsize cfgsize) |
1423 | { | 1408 | { |
1409 | int ret; | ||
1410 | |||
1424 | /* Setup DMA ring descriptor variables */ | 1411 | /* Setup DMA ring descriptor variables */ |
1425 | ring->pdma = chan->pdma; | 1412 | ring->pdma = chan->pdma; |
1426 | ring->cfgsize = cfgsize; | 1413 | ring->cfgsize = cfgsize; |
1427 | ring->num = chan->pdma->ring_num++; | 1414 | ring->num = chan->pdma->ring_num++; |
1428 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | 1415 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); |
1429 | 1416 | ||
1430 | ring->size = xgene_dma_get_ring_size(chan, cfgsize); | 1417 | ret = xgene_dma_get_ring_size(chan, cfgsize); |
1431 | if (ring->size <= 0) | 1418 | if (ret <= 0) |
1432 | return ring->size; | 1419 | return ret; |
1420 | ring->size = ret; | ||
1433 | 1421 | ||
1434 | /* Allocate memory for DMA ring descriptor */ | 1422 | /* Allocate memory for DMA ring descriptor */ |
1435 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | 1423 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, |
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | |||
1482 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | 1470 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); |
1483 | 1471 | ||
1484 | /* Set the max outstanding request possible to this channel */ | 1472 | /* Set the max outstanding request possible to this channel */ |
1485 | chan->max_outstanding = rx_ring->slots; | 1473 | chan->max_outstanding = tx_ring->slots; |
1486 | 1474 | ||
1487 | return ret; | 1475 | return ret; |
1488 | } | 1476 | } |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c | |||
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
739 | struct dma_chan *chan; | 739 | struct dma_chan *chan; |
740 | struct zx_dma_chan *c; | 740 | struct zx_dma_chan *c; |
741 | 741 | ||
742 | if (request > d->dma_requests) | 742 | if (request >= d->dma_requests) |
743 | return NULL; | 743 | return NULL; |
744 | 744 | ||
745 | chan = dma_get_any_slave_channel(&d->slave); | 745 | chan = dma_get_any_slave_channel(&d->slave); |
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index a07addde297b..8dd0af1d50bc 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c | |||
@@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) | |||
159 | static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) | 159 | static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) |
160 | { | 160 | { |
161 | if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { | 161 | if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { |
162 | *attached = new ? true : false; | 162 | *attached = ((new >> idx) & 0x1) ? true : false; |
163 | return true; | 163 | return true; |
164 | } | 164 | } |
165 | 165 | ||
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index d8de6a8dd4de..665efca59487 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -139,6 +139,14 @@ config QCOM_SCM | |||
139 | bool | 139 | bool |
140 | depends on ARM || ARM64 | 140 | depends on ARM || ARM64 |
141 | 141 | ||
142 | config QCOM_SCM_32 | ||
143 | def_bool y | ||
144 | depends on QCOM_SCM && ARM | ||
145 | |||
146 | config QCOM_SCM_64 | ||
147 | def_bool y | ||
148 | depends on QCOM_SCM && ARM64 | ||
149 | |||
142 | source "drivers/firmware/broadcom/Kconfig" | 150 | source "drivers/firmware/broadcom/Kconfig" |
143 | source "drivers/firmware/google/Kconfig" | 151 | source "drivers/firmware/google/Kconfig" |
144 | source "drivers/firmware/efi/Kconfig" | 152 | source "drivers/firmware/efi/Kconfig" |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 000830fc6707..2ee83474a3c1 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o | |||
13 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o | 13 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o |
14 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | 14 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o |
15 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o | 15 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o |
16 | obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o | 16 | obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o |
17 | obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o | ||
17 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) | 18 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) |
18 | 19 | ||
19 | obj-y += broadcom/ | 20 | obj-y += broadcom/ |
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/efi.h> | 15 | #include <linux/efi.h> |
16 | #include <linux/sort.h> | ||
16 | #include <asm/efi.h> | 17 | #include <asm/efi.h> |
17 | 18 | ||
18 | #include "efistub.h" | 19 | #include "efistub.h" |
@@ -305,6 +306,44 @@ fail: | |||
305 | */ | 306 | */ |
306 | #define EFI_RT_VIRTUAL_BASE 0x40000000 | 307 | #define EFI_RT_VIRTUAL_BASE 0x40000000 |
307 | 308 | ||
309 | static int cmp_mem_desc(const void *l, const void *r) | ||
310 | { | ||
311 | const efi_memory_desc_t *left = l, *right = r; | ||
312 | |||
313 | return (left->phys_addr > right->phys_addr) ? 1 : -1; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Returns whether region @left ends exactly where region @right starts, | ||
318 | * or false if either argument is NULL. | ||
319 | */ | ||
320 | static bool regions_are_adjacent(efi_memory_desc_t *left, | ||
321 | efi_memory_desc_t *right) | ||
322 | { | ||
323 | u64 left_end; | ||
324 | |||
325 | if (left == NULL || right == NULL) | ||
326 | return false; | ||
327 | |||
328 | left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; | ||
329 | |||
330 | return left_end == right->phys_addr; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Returns whether region @left and region @right have compatible memory type | ||
335 | * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. | ||
336 | */ | ||
337 | static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, | ||
338 | efi_memory_desc_t *right) | ||
339 | { | ||
340 | static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | | ||
341 | EFI_MEMORY_WC | EFI_MEMORY_UC | | ||
342 | EFI_MEMORY_RUNTIME; | ||
343 | |||
344 | return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; | ||
345 | } | ||
346 | |||
308 | /* | 347 | /* |
309 | * efi_get_virtmap() - create a virtual mapping for the EFI memory map | 348 | * efi_get_virtmap() - create a virtual mapping for the EFI memory map |
310 | * | 349 | * |
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, | |||
317 | int *count) | 356 | int *count) |
318 | { | 357 | { |
319 | u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; | 358 | u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; |
320 | efi_memory_desc_t *out = runtime_map; | 359 | efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; |
321 | int l; | 360 | int l; |
322 | 361 | ||
323 | for (l = 0; l < map_size; l += desc_size) { | 362 | /* |
324 | efi_memory_desc_t *in = (void *)memory_map + l; | 363 | * To work around potential issues with the Properties Table feature |
364 | * introduced in UEFI 2.5, which may split PE/COFF executable images | ||
365 | * in memory into several RuntimeServicesCode and RuntimeServicesData | ||
366 | * regions, we need to preserve the relative offsets between adjacent | ||
367 | * EFI_MEMORY_RUNTIME regions with the same memory type attributes. | ||
368 | * The easiest way to find adjacent regions is to sort the memory map | ||
369 | * before traversing it. | ||
370 | */ | ||
371 | sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); | ||
372 | |||
373 | for (l = 0; l < map_size; l += desc_size, prev = in) { | ||
325 | u64 paddr, size; | 374 | u64 paddr, size; |
326 | 375 | ||
376 | in = (void *)memory_map + l; | ||
327 | if (!(in->attribute & EFI_MEMORY_RUNTIME)) | 377 | if (!(in->attribute & EFI_MEMORY_RUNTIME)) |
328 | continue; | 378 | continue; |
329 | 379 | ||
380 | paddr = in->phys_addr; | ||
381 | size = in->num_pages * EFI_PAGE_SIZE; | ||
382 | |||
330 | /* | 383 | /* |
331 | * Make the mapping compatible with 64k pages: this allows | 384 | * Make the mapping compatible with 64k pages: this allows |
332 | * a 4k page size kernel to kexec a 64k page size kernel and | 385 | * a 4k page size kernel to kexec a 64k page size kernel and |
333 | * vice versa. | 386 | * vice versa. |
334 | */ | 387 | */ |
335 | paddr = round_down(in->phys_addr, SZ_64K); | 388 | if (!regions_are_adjacent(prev, in) || |
336 | size = round_up(in->num_pages * EFI_PAGE_SIZE + | 389 | !regions_have_compatible_memory_type_attrs(prev, in)) { |
337 | in->phys_addr - paddr, SZ_64K); | 390 | |
338 | 391 | paddr = round_down(in->phys_addr, SZ_64K); | |
339 | /* | 392 | size += in->phys_addr - paddr; |
340 | * Avoid wasting memory on PTEs by choosing a virtual base that | 393 | |
341 | * is compatible with section mappings if this region has the | 394 | /* |
342 | * appropriate size and physical alignment. (Sections are 2 MB | 395 | * Avoid wasting memory on PTEs by choosing a virtual |
343 | * on 4k granule kernels) | 396 | * base that is compatible with section mappings if this |
344 | */ | 397 | * region has the appropriate size and physical |
345 | if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) | 398 | * alignment. (Sections are 2 MB on 4k granule kernels) |
346 | efi_virt_base = round_up(efi_virt_base, SZ_2M); | 399 | */ |
400 | if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) | ||
401 | efi_virt_base = round_up(efi_virt_base, SZ_2M); | ||
402 | else | ||
403 | efi_virt_base = round_up(efi_virt_base, SZ_64K); | ||
404 | } | ||
347 | 405 | ||
348 | in->virt_addr = efi_virt_base + in->phys_addr - paddr; | 406 | in->virt_addr = efi_virt_base + in->phys_addr - paddr; |
349 | efi_virt_base += size; | 407 | efi_virt_base += size; |
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index e334a01cf92f..6b6548fda089 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h | |||
@@ -5,10 +5,6 @@ | |||
5 | /* error code which can't be mistaken for valid address */ | 5 | /* error code which can't be mistaken for valid address */ |
6 | #define EFI_ERROR (~0UL) | 6 | #define EFI_ERROR (~0UL) |
7 | 7 | ||
8 | #undef memcpy | ||
9 | #undef memset | ||
10 | #undef memmove | ||
11 | |||
12 | void efi_char16_printk(efi_system_table_t *, efi_char16_t *); | 8 | void efi_char16_printk(efi_system_table_t *, efi_char16_t *); |
13 | 9 | ||
14 | efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, | 10 | efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, |
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c new file mode 100644 index 000000000000..bb6555f6d63b --- /dev/null +++ b/drivers/firmware/qcom_scm-64.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/io.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/qcom_scm.h> | ||
16 | |||
17 | /** | ||
18 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus | ||
19 | * @entry: Entry point function for the cpus | ||
20 | * @cpus: The cpumask of cpus that will use the entry point | ||
21 | * | ||
22 | * Set the cold boot address of the cpus. Any cpu outside the supported | ||
23 | * range would be removed from the cpu present mask. | ||
24 | */ | ||
25 | int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) | ||
26 | { | ||
27 | return -ENOTSUPP; | ||
28 | } | ||
29 | |||
30 | /** | ||
31 | * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus | ||
32 | * @entry: Entry point function for the cpus | ||
33 | * @cpus: The cpumask of cpus that will use the entry point | ||
34 | * | ||
35 | * Set the Linux entry point for the SCM to transfer control to when coming | ||
36 | * out of a power down. CPU power down may be executed on cpuidle or hotplug. | ||
37 | */ | ||
38 | int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) | ||
39 | { | ||
40 | return -ENOTSUPP; | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * qcom_scm_cpu_power_down() - Power down the cpu | ||
45 | * @flags - Flags to flush cache | ||
46 | * | ||
47 | * This is an end point to power down cpu. If there was a pending interrupt, | ||
48 | * the control would return from this function, otherwise, the cpu jumps to the | ||
49 | * warm boot entry point set for this cpu upon reset. | ||
50 | */ | ||
51 | void __qcom_scm_cpu_power_down(u32 flags) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) | ||
56 | { | ||
57 | return -ENOTSUPP; | ||
58 | } | ||
59 | |||
60 | int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) | ||
61 | { | ||
62 | return -ENOTSUPP; | ||
63 | } | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b4fc9e4d24c6..8949b3f6f74d 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -356,7 +356,7 @@ config GPIO_PXA | |||
356 | 356 | ||
357 | config GPIO_RCAR | 357 | config GPIO_RCAR |
358 | tristate "Renesas R-Car GPIO" | 358 | tristate "Renesas R-Car GPIO" |
359 | depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) | 359 | depends on ARCH_SHMOBILE || COMPILE_TEST |
360 | select GPIOLIB_IRQCHIP | 360 | select GPIOLIB_IRQCHIP |
361 | help | 361 | help |
362 | Say yes here to support GPIO on Renesas R-Car SoCs. | 362 | Say yes here to support GPIO on Renesas R-Car SoCs. |
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 9b7e0b3db387..1b44941574fa 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c | |||
@@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc, | |||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | static void altera_gpio_irq_edge_handler(unsigned int irq, | 204 | static void altera_gpio_irq_edge_handler(struct irq_desc *desc) |
205 | struct irq_desc *desc) | ||
206 | { | 205 | { |
207 | struct altera_gpio_chip *altera_gc; | 206 | struct altera_gpio_chip *altera_gc; |
208 | struct irq_chip *chip; | 207 | struct irq_chip *chip; |
@@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq, | |||
231 | } | 230 | } |
232 | 231 | ||
233 | 232 | ||
234 | static void altera_gpio_irq_leveL_high_handler(unsigned int irq, | 233 | static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) |
235 | struct irq_desc *desc) | ||
236 | { | 234 | { |
237 | struct altera_gpio_chip *altera_gc; | 235 | struct altera_gpio_chip *altera_gc; |
238 | struct irq_chip *chip; | 236 | struct irq_chip *chip; |
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 31b90ac15204..33a1f9779b86 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
@@ -433,7 +433,7 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 436 | static void bcm_kona_gpio_irq_handler(struct irq_desc *desc) |
437 | { | 437 | { |
438 | void __iomem *reg_base; | 438 | void __iomem *reg_base; |
439 | int bit, bank_id; | 439 | int bit, bank_id; |
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 9ea86d2ac054..4c64627c6bb5 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c | |||
@@ -236,7 +236,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | /* Each UPG GIO block has one IRQ for all banks */ | 238 | /* Each UPG GIO block has one IRQ for all banks */ |
239 | static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 239 | static void brcmstb_gpio_irq_handler(struct irq_desc *desc) |
240 | { | 240 | { |
241 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 241 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
242 | struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); | 242 | struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); |
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 94b0ab709721..5e715388803d 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c | |||
@@ -326,8 +326,7 @@ static struct irq_chip gpio_irqchip = { | |||
326 | .flags = IRQCHIP_SET_TYPE_MASKED, | 326 | .flags = IRQCHIP_SET_TYPE_MASKED, |
327 | }; | 327 | }; |
328 | 328 | ||
329 | static void | 329 | static void gpio_irq_handler(struct irq_desc *desc) |
330 | gpio_irq_handler(unsigned __irq, struct irq_desc *desc) | ||
331 | { | 330 | { |
332 | unsigned int irq = irq_desc_get_irq(desc); | 331 | unsigned int irq = irq_desc_get_irq(desc); |
333 | struct davinci_gpio_regs __iomem *g; | 332 | struct davinci_gpio_regs __iomem *g; |
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index c5be4b9b8baf..fcd5b0acfc72 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c | |||
@@ -147,7 +147,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio) | |||
147 | return ret; | 147 | return ret; |
148 | } | 148 | } |
149 | 149 | ||
150 | static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) | 150 | static void dwapb_irq_handler(struct irq_desc *desc) |
151 | { | 151 | { |
152 | struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); | 152 | struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); |
153 | struct irq_chip *chip = irq_desc_get_chip(desc); | 153 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 9d90366ea259..3e3947b35c83 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c | |||
@@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) | |||
78 | EP93XX_GPIO_REG(int_debounce_register_offset[port])); | 78 | EP93XX_GPIO_REG(int_debounce_register_offset[port])); |
79 | } | 79 | } |
80 | 80 | ||
81 | static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) | 81 | static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) |
82 | { | 82 | { |
83 | unsigned char status; | 83 | unsigned char status; |
84 | int i; | 84 | int i; |
@@ -100,8 +100,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | static void ep93xx_gpio_f_irq_handler(unsigned int __irq, | 103 | static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) |
104 | struct irq_desc *desc) | ||
105 | { | 104 | { |
106 | /* | 105 | /* |
107 | * map discontiguous hw irq range to continuous sw irq range: | 106 | * map discontiguous hw irq range to continuous sw irq range: |
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index aa28c65eb6b4..70097472b02c 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c | |||
@@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = { | |||
301 | }; | 301 | }; |
302 | MODULE_DEVICE_TABLE(pci, intel_gpio_ids); | 302 | MODULE_DEVICE_TABLE(pci, intel_gpio_ids); |
303 | 303 | ||
304 | static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) | 304 | static void intel_mid_irq_handler(struct irq_desc *desc) |
305 | { | 305 | { |
306 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 306 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
307 | struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); | 307 | struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); |
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 153af464c7a7..127c37b380ae 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c | |||
@@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, | |||
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) | 237 | static void lp_gpio_irq_handler(struct irq_desc *desc) |
238 | { | 238 | { |
239 | struct irq_data *data = irq_desc_get_irq_data(desc); | 239 | struct irq_data *data = irq_desc_get_irq_data(desc); |
240 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 240 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 8ef7a12de983..48ef368347ab 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c | |||
@@ -194,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) | |||
194 | return -ENXIO; | 194 | return -ENXIO; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) | 197 | static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc) |
198 | { | 198 | { |
199 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); | 199 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); |
200 | struct irq_chip *chip = irq_desc_get_chip(desc); | 200 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 7bcfb87a5fa6..22523aae8abe 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c | |||
@@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = { | |||
232 | .irq_bus_sync_unlock = msic_bus_sync_unlock, | 232 | .irq_bus_sync_unlock = msic_bus_sync_unlock, |
233 | }; | 233 | }; |
234 | 234 | ||
235 | static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 235 | static void msic_gpio_irq_handler(struct irq_desc *desc) |
236 | { | 236 | { |
237 | struct irq_data *data = irq_desc_get_irq_data(desc); | 237 | struct irq_data *data = irq_desc_get_irq_data(desc); |
238 | struct msic_gpio *mg = irq_data_get_irq_handler_data(data); | 238 | struct msic_gpio *mg = irq_data_get_irq_handler_data(data); |
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index d2012cfb5571..4b4222145f10 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c | |||
@@ -305,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) | |||
305 | * which have been set as summary IRQ lines and which are triggered, | 305 | * which have been set as summary IRQ lines and which are triggered, |
306 | * and to call their interrupt handlers. | 306 | * and to call their interrupt handlers. |
307 | */ | 307 | */ |
308 | static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) | 308 | static void msm_summary_irq_handler(struct irq_desc *desc) |
309 | { | 309 | { |
310 | unsigned long i; | 310 | unsigned long i; |
311 | struct irq_chip *chip = irq_desc_get_chip(desc); | 311 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index b396bf3bf294..df418b81456d 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
@@ -458,7 +458,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) | 461 | static void mvebu_gpio_irq_handler(struct irq_desc *desc) |
462 | { | 462 | { |
463 | struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); | 463 | struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); |
464 | struct irq_chip *chip = irq_desc_get_chip(desc); | 464 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index b752b560126e..b8dd847443c5 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c | |||
@@ -272,7 +272,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) | |||
272 | } | 272 | } |
273 | 273 | ||
274 | /* MX1 and MX3 has one interrupt *per* gpio port */ | 274 | /* MX1 and MX3 has one interrupt *per* gpio port */ |
275 | static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 275 | static void mx3_gpio_irq_handler(struct irq_desc *desc) |
276 | { | 276 | { |
277 | u32 irq_stat; | 277 | u32 irq_stat; |
278 | struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); | 278 | struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); |
@@ -288,7 +288,7 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |||
288 | } | 288 | } |
289 | 289 | ||
290 | /* MX2 has one interrupt *for all* gpio ports */ | 290 | /* MX2 has one interrupt *for all* gpio ports */ |
291 | static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 291 | static void mx2_gpio_irq_handler(struct irq_desc *desc) |
292 | { | 292 | { |
293 | u32 irq_msk, irq_stat; | 293 | u32 irq_msk, irq_stat; |
294 | struct mxc_gpio_port *port; | 294 | struct mxc_gpio_port *port; |
@@ -339,13 +339,15 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) | |||
339 | return 0; | 339 | return 0; |
340 | } | 340 | } |
341 | 341 | ||
342 | static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) | 342 | static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) |
343 | { | 343 | { |
344 | struct irq_chip_generic *gc; | 344 | struct irq_chip_generic *gc; |
345 | struct irq_chip_type *ct; | 345 | struct irq_chip_type *ct; |
346 | 346 | ||
347 | gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base, | 347 | gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base, |
348 | port->base, handle_level_irq); | 348 | port->base, handle_level_irq); |
349 | if (!gc) | ||
350 | return -ENOMEM; | ||
349 | gc->private = port; | 351 | gc->private = port; |
350 | 352 | ||
351 | ct = gc->chip_types; | 353 | ct = gc->chip_types; |
@@ -360,6 +362,8 @@ static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) | |||
360 | 362 | ||
361 | irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, | 363 | irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, |
362 | IRQ_NOREQUEST, 0); | 364 | IRQ_NOREQUEST, 0); |
365 | |||
366 | return 0; | ||
363 | } | 367 | } |
364 | 368 | ||
365 | static void mxc_gpio_get_hw(struct platform_device *pdev) | 369 | static void mxc_gpio_get_hw(struct platform_device *pdev) |
@@ -477,12 +481,16 @@ static int mxc_gpio_probe(struct platform_device *pdev) | |||
477 | } | 481 | } |
478 | 482 | ||
479 | /* gpio-mxc can be a generic irq chip */ | 483 | /* gpio-mxc can be a generic irq chip */ |
480 | mxc_gpio_init_gc(port, irq_base); | 484 | err = mxc_gpio_init_gc(port, irq_base); |
485 | if (err < 0) | ||
486 | goto out_irqdomain_remove; | ||
481 | 487 | ||
482 | list_add_tail(&port->node, &mxc_gpio_ports); | 488 | list_add_tail(&port->node, &mxc_gpio_ports); |
483 | 489 | ||
484 | return 0; | 490 | return 0; |
485 | 491 | ||
492 | out_irqdomain_remove: | ||
493 | irq_domain_remove(port->domain); | ||
486 | out_irqdesc_free: | 494 | out_irqdesc_free: |
487 | irq_free_descs(irq_base, 32); | 495 | irq_free_descs(irq_base, 32); |
488 | out_gpiochip_remove: | 496 | out_gpiochip_remove: |
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index b7f383eb18d9..a4288f428819 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c | |||
@@ -154,7 +154,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio) | |||
154 | } | 154 | } |
155 | 155 | ||
156 | /* MXS has one interrupt *per* gpio port */ | 156 | /* MXS has one interrupt *per* gpio port */ |
157 | static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 157 | static void mxs_gpio_irq_handler(struct irq_desc *desc) |
158 | { | 158 | { |
159 | u32 irq_stat; | 159 | u32 irq_stat; |
160 | struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); | 160 | struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); |
@@ -196,13 +196,16 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) | |||
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
198 | 198 | ||
199 | static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) | 199 | static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) |
200 | { | 200 | { |
201 | struct irq_chip_generic *gc; | 201 | struct irq_chip_generic *gc; |
202 | struct irq_chip_type *ct; | 202 | struct irq_chip_type *ct; |
203 | 203 | ||
204 | gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base, | 204 | gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base, |
205 | port->base, handle_level_irq); | 205 | port->base, handle_level_irq); |
206 | if (!gc) | ||
207 | return -ENOMEM; | ||
208 | |||
206 | gc->private = port; | 209 | gc->private = port; |
207 | 210 | ||
208 | ct = gc->chip_types; | 211 | ct = gc->chip_types; |
@@ -216,6 +219,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) | |||
216 | 219 | ||
217 | irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, | 220 | irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, |
218 | IRQ_NOREQUEST, 0); | 221 | IRQ_NOREQUEST, 0); |
222 | |||
223 | return 0; | ||
219 | } | 224 | } |
220 | 225 | ||
221 | static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) | 226 | static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) |
@@ -317,7 +322,9 @@ static int mxs_gpio_probe(struct platform_device *pdev) | |||
317 | } | 322 | } |
318 | 323 | ||
319 | /* gpio-mxs can be a generic irq chip */ | 324 | /* gpio-mxs can be a generic irq chip */ |
320 | mxs_gpio_init_gc(port, irq_base); | 325 | err = mxs_gpio_init_gc(port, irq_base); |
326 | if (err < 0) | ||
327 | goto out_irqdomain_remove; | ||
321 | 328 | ||
322 | /* setup one handler for each entry */ | 329 | /* setup one handler for each entry */ |
323 | irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, | 330 | irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, |
@@ -343,6 +350,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) | |||
343 | 350 | ||
344 | out_bgpio_remove: | 351 | out_bgpio_remove: |
345 | bgpio_remove(&port->bgc); | 352 | bgpio_remove(&port->bgc); |
353 | out_irqdomain_remove: | ||
354 | irq_domain_remove(port->domain); | ||
346 | out_irqdesc_free: | 355 | out_irqdesc_free: |
347 | irq_free_descs(irq_base, 32); | 356 | irq_free_descs(irq_base, 32); |
348 | return err; | 357 | return err; |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 2ae0d47e9554..5236db161e76 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -709,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
709 | * line's interrupt handler has been run, we may miss some nested | 709 | * line's interrupt handler has been run, we may miss some nested |
710 | * interrupts. | 710 | * interrupts. |
711 | */ | 711 | */ |
712 | static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 712 | static void omap_gpio_irq_handler(struct irq_desc *desc) |
713 | { | 713 | { |
714 | void __iomem *isr_reg = NULL; | 714 | void __iomem *isr_reg = NULL; |
715 | u32 isr; | 715 | u32 isr; |
@@ -1098,7 +1098,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
1098 | } else { | 1098 | } else { |
1099 | bank->chip.label = "gpio"; | 1099 | bank->chip.label = "gpio"; |
1100 | bank->chip.base = gpio; | 1100 | bank->chip.base = gpio; |
1101 | gpio += bank->width; | ||
1102 | } | 1101 | } |
1103 | bank->chip.ngpio = bank->width; | 1102 | bank->chip.ngpio = bank->width; |
1104 | 1103 | ||
@@ -1108,6 +1107,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
1108 | return ret; | 1107 | return ret; |
1109 | } | 1108 | } |
1110 | 1109 | ||
1110 | if (!bank->is_mpuio) | ||
1111 | gpio += bank->width; | ||
1112 | |||
1111 | #ifdef CONFIG_ARCH_OMAP1 | 1113 | #ifdef CONFIG_ARCH_OMAP1 |
1112 | /* | 1114 | /* |
1113 | * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop | 1115 | * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop |
@@ -1253,8 +1255,11 @@ static int omap_gpio_probe(struct platform_device *pdev) | |||
1253 | omap_gpio_mod_init(bank); | 1255 | omap_gpio_mod_init(bank); |
1254 | 1256 | ||
1255 | ret = omap_gpio_chip_init(bank, irqc); | 1257 | ret = omap_gpio_chip_init(bank, irqc); |
1256 | if (ret) | 1258 | if (ret) { |
1259 | pm_runtime_put_sync(bank->dev); | ||
1260 | pm_runtime_disable(bank->dev); | ||
1257 | return ret; | 1261 | return ret; |
1262 | } | ||
1258 | 1263 | ||
1259 | omap_gpio_show_rev(bank); | 1264 | omap_gpio_show_rev(bank); |
1260 | 1265 | ||
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 04756130437f..229ef653e0f8 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c | |||
@@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger) | |||
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) | 190 | static void pl061_irq_handler(struct irq_desc *desc) |
191 | { | 191 | { |
192 | unsigned long pending; | 192 | unsigned long pending; |
193 | int offset; | 193 | int offset; |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 55a11de3d5b7..df2ce550f309 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
@@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
401 | return 0; | 401 | return 0; |
402 | } | 402 | } |
403 | 403 | ||
404 | static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) | 404 | static void pxa_gpio_demux_handler(struct irq_desc *desc) |
405 | { | 405 | { |
406 | struct pxa_gpio_chip *c; | 406 | struct pxa_gpio_chip *c; |
407 | int loop, gpio, gpio_base, n; | 407 | int loop, gpio, gpio_base, n; |
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 67bd2f5d89e8..990fa9023e22 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c | |||
@@ -172,8 +172,7 @@ static struct irq_domain *sa1100_gpio_irqdomain; | |||
172 | * irq_controller_lock held, and IRQs disabled. Decode the IRQ | 172 | * irq_controller_lock held, and IRQs disabled. Decode the IRQ |
173 | * and call the handler. | 173 | * and call the handler. |
174 | */ | 174 | */ |
175 | static void | 175 | static void sa1100_gpio_handler(struct irq_desc *desc) |
176 | sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc) | ||
177 | { | 176 | { |
178 | unsigned int irq, mask; | 177 | unsigned int irq, mask; |
179 | 178 | ||
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index 458d9d7952b8..9c6b96707c9f 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c | |||
@@ -706,4 +706,3 @@ module_exit(sx150x_exit); | |||
706 | MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); | 706 | MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); |
707 | MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); | 707 | MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); |
708 | MODULE_LICENSE("GPL v2"); | 708 | MODULE_LICENSE("GPL v2"); |
709 | MODULE_ALIAS("i2c:sx150x"); | ||
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 9b14aafb576d..027e5f47dd28 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c | |||
@@ -266,7 +266,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) | |||
266 | gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); | 266 | gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); |
267 | } | 267 | } |
268 | 268 | ||
269 | static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 269 | static void tegra_gpio_irq_handler(struct irq_desc *desc) |
270 | { | 270 | { |
271 | int port; | 271 | int port; |
272 | int pin; | 272 | int pin; |
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 5a492054589f..30653e6319e9 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c | |||
@@ -192,7 +192,7 @@ out: | |||
192 | return ret; | 192 | return ret; |
193 | } | 193 | } |
194 | 194 | ||
195 | static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) | 195 | static void timbgpio_irq(struct irq_desc *desc) |
196 | { | 196 | { |
197 | struct timbgpio *tgpio = irq_desc_get_handler_data(desc); | 197 | struct timbgpio *tgpio = irq_desc_get_handler_data(desc); |
198 | struct irq_data *data = irq_desc_get_irq_data(desc); | 198 | struct irq_data *data = irq_desc_get_irq_data(desc); |
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c index bbac92ae4c32..87bb1b1eee8d 100644 --- a/drivers/gpio/gpio-tz1090.c +++ b/drivers/gpio/gpio-tz1090.c | |||
@@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on) | |||
375 | #define gpio_set_irq_wake NULL | 375 | #define gpio_set_irq_wake NULL |
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 378 | static void tz1090_gpio_irq_handler(struct irq_desc *desc) |
379 | { | 379 | { |
380 | irq_hw_number_t hw; | 380 | irq_hw_number_t hw; |
381 | unsigned int irq_stat, irq_no; | 381 | unsigned int irq_stat, irq_no; |
@@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
400 | == IRQ_TYPE_EDGE_BOTH) | 400 | == IRQ_TYPE_EDGE_BOTH) |
401 | tz1090_gpio_irq_next_edge(bank, hw); | 401 | tz1090_gpio_irq_next_edge(bank, hw); |
402 | 402 | ||
403 | generic_handle_irq_desc(irq_no, child_desc); | 403 | generic_handle_irq_desc(child_desc); |
404 | } | 404 | } |
405 | } | 405 | } |
406 | 406 | ||
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 3d5714d4f405..069f9e4b7daa 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c | |||
@@ -120,7 +120,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, | |||
120 | return pinctrl_gpio_direction_output(chip->base + gpio); | 120 | return pinctrl_gpio_direction_output(chip->base + gpio); |
121 | } | 121 | } |
122 | 122 | ||
123 | static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 123 | static void vf610_gpio_irq_handler(struct irq_desc *desc) |
124 | { | 124 | { |
125 | struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); | 125 | struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); |
126 | struct irq_chip *chip = irq_desc_get_chip(desc); | 126 | struct irq_chip *chip = irq_desc_get_chip(desc); |
@@ -176,9 +176,9 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type) | |||
176 | port->irqc[d->hwirq] = irqc; | 176 | port->irqc[d->hwirq] = irqc; |
177 | 177 | ||
178 | if (type & IRQ_TYPE_LEVEL_MASK) | 178 | if (type & IRQ_TYPE_LEVEL_MASK) |
179 | __irq_set_handler_locked(d->irq, handle_level_irq); | 179 | irq_set_handler_locked(d, handle_level_irq); |
180 | else | 180 | else |
181 | __irq_set_handler_locked(d->irq, handle_edge_irq); | 181 | irq_set_handler_locked(d, handle_edge_irq); |
182 | 182 | ||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c index 12ee1969298c..4b8a26910705 100644 --- a/drivers/gpio/gpio-zx.c +++ b/drivers/gpio/gpio-zx.c | |||
@@ -177,7 +177,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger) | |||
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
179 | 179 | ||
180 | static void zx_irq_handler(unsigned irq, struct irq_desc *desc) | 180 | static void zx_irq_handler(struct irq_desc *desc) |
181 | { | 181 | { |
182 | unsigned long pending; | 182 | unsigned long pending; |
183 | int offset; | 183 | int offset; |
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 27348e7cb705..1d1a5865ede9 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
@@ -514,7 +514,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, | |||
514 | * application for that pin. | 514 | * application for that pin. |
515 | * Note: A bug is reported if no handler is set for the gpio pin. | 515 | * Note: A bug is reported if no handler is set for the gpio pin. |
516 | */ | 516 | */ |
517 | static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) | 517 | static void zynq_gpio_irqhandler(struct irq_desc *desc) |
518 | { | 518 | { |
519 | u32 int_sts, int_enb; | 519 | u32 int_sts, int_enb; |
520 | unsigned int bank_num; | 520 | unsigned int bank_num; |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 980c1f87866a..5db3445552b1 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -1174,15 +1174,16 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low); | |||
1174 | * that the GPIO was actually requested. | 1174 | * that the GPIO was actually requested. |
1175 | */ | 1175 | */ |
1176 | 1176 | ||
1177 | static bool _gpiod_get_raw_value(const struct gpio_desc *desc) | 1177 | static int _gpiod_get_raw_value(const struct gpio_desc *desc) |
1178 | { | 1178 | { |
1179 | struct gpio_chip *chip; | 1179 | struct gpio_chip *chip; |
1180 | bool value; | ||
1181 | int offset; | 1180 | int offset; |
1181 | int value; | ||
1182 | 1182 | ||
1183 | chip = desc->chip; | 1183 | chip = desc->chip; |
1184 | offset = gpio_chip_hwgpio(desc); | 1184 | offset = gpio_chip_hwgpio(desc); |
1185 | value = chip->get ? chip->get(chip, offset) : false; | 1185 | value = chip->get ? chip->get(chip, offset) : -EIO; |
1186 | value = value < 0 ? value : !!value; | ||
1186 | trace_gpio_value(desc_to_gpio(desc), 1, value); | 1187 | trace_gpio_value(desc_to_gpio(desc), 1, value); |
1187 | return value; | 1188 | return value; |
1188 | } | 1189 | } |
@@ -1192,7 +1193,7 @@ static bool _gpiod_get_raw_value(const struct gpio_desc *desc) | |||
1192 | * @desc: gpio whose value will be returned | 1193 | * @desc: gpio whose value will be returned |
1193 | * | 1194 | * |
1194 | * Return the GPIO's raw value, i.e. the value of the physical line disregarding | 1195 | * Return the GPIO's raw value, i.e. the value of the physical line disregarding |
1195 | * its ACTIVE_LOW status. | 1196 | * its ACTIVE_LOW status, or negative errno on failure. |
1196 | * | 1197 | * |
1197 | * This function should be called from contexts where we cannot sleep, and will | 1198 | * This function should be called from contexts where we cannot sleep, and will |
1198 | * complain if the GPIO chip functions potentially sleep. | 1199 | * complain if the GPIO chip functions potentially sleep. |
@@ -1212,7 +1213,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value); | |||
1212 | * @desc: gpio whose value will be returned | 1213 | * @desc: gpio whose value will be returned |
1213 | * | 1214 | * |
1214 | * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into | 1215 | * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into |
1215 | * account. | 1216 | * account, or negative errno on failure. |
1216 | * | 1217 | * |
1217 | * This function should be called from contexts where we cannot sleep, and will | 1218 | * This function should be called from contexts where we cannot sleep, and will |
1218 | * complain if the GPIO chip functions potentially sleep. | 1219 | * complain if the GPIO chip functions potentially sleep. |
@@ -1226,6 +1227,9 @@ int gpiod_get_value(const struct gpio_desc *desc) | |||
1226 | WARN_ON(desc->chip->can_sleep); | 1227 | WARN_ON(desc->chip->can_sleep); |
1227 | 1228 | ||
1228 | value = _gpiod_get_raw_value(desc); | 1229 | value = _gpiod_get_raw_value(desc); |
1230 | if (value < 0) | ||
1231 | return value; | ||
1232 | |||
1229 | if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) | 1233 | if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) |
1230 | value = !value; | 1234 | value = !value; |
1231 | 1235 | ||
@@ -1548,7 +1552,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); | |||
1548 | * @desc: gpio whose value will be returned | 1552 | * @desc: gpio whose value will be returned |
1549 | * | 1553 | * |
1550 | * Return the GPIO's raw value, i.e. the value of the physical line disregarding | 1554 | * Return the GPIO's raw value, i.e. the value of the physical line disregarding |
1551 | * its ACTIVE_LOW status. | 1555 | * its ACTIVE_LOW status, or negative errno on failure. |
1552 | * | 1556 | * |
1553 | * This function is to be called from contexts that can sleep. | 1557 | * This function is to be called from contexts that can sleep. |
1554 | */ | 1558 | */ |
@@ -1566,7 +1570,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); | |||
1566 | * @desc: gpio whose value will be returned | 1570 | * @desc: gpio whose value will be returned |
1567 | * | 1571 | * |
1568 | * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into | 1572 | * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into |
1569 | * account. | 1573 | * account, or negative errno on failure. |
1570 | * | 1574 | * |
1571 | * This function is to be called from contexts that can sleep. | 1575 | * This function is to be called from contexts that can sleep. |
1572 | */ | 1576 | */ |
@@ -1579,6 +1583,9 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc) | |||
1579 | return 0; | 1583 | return 0; |
1580 | 1584 | ||
1581 | value = _gpiod_get_raw_value(desc); | 1585 | value = _gpiod_get_raw_value(desc); |
1586 | if (value < 0) | ||
1587 | return value; | ||
1588 | |||
1582 | if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) | 1589 | if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) |
1583 | value = !value; | 1590 | value = !value; |
1584 | 1591 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 668939a14206..6647fb26ef25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size; | |||
82 | extern int amdgpu_enable_scheduler; | 82 | extern int amdgpu_enable_scheduler; |
83 | extern int amdgpu_sched_jobs; | 83 | extern int amdgpu_sched_jobs; |
84 | extern int amdgpu_sched_hw_submission; | 84 | extern int amdgpu_sched_hw_submission; |
85 | extern int amdgpu_enable_semaphores; | ||
85 | 86 | ||
86 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 87 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
87 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 88 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev); | |||
432 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | 433 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); |
433 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | 434 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); |
434 | 435 | ||
435 | void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); | 436 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); |
436 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | 437 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, |
437 | struct amdgpu_irq_src *irq_src, | 438 | struct amdgpu_irq_src *irq_src, |
438 | unsigned irq_type); | 439 | unsigned irq_type); |
@@ -890,7 +891,7 @@ struct amdgpu_ring { | |||
890 | struct amdgpu_device *adev; | 891 | struct amdgpu_device *adev; |
891 | const struct amdgpu_ring_funcs *funcs; | 892 | const struct amdgpu_ring_funcs *funcs; |
892 | struct amdgpu_fence_driver fence_drv; | 893 | struct amdgpu_fence_driver fence_drv; |
893 | struct amd_gpu_scheduler *scheduler; | 894 | struct amd_gpu_scheduler sched; |
894 | 895 | ||
895 | spinlock_t fence_lock; | 896 | spinlock_t fence_lock; |
896 | struct mutex *ring_lock; | 897 | struct mutex *ring_lock; |
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx { | |||
1201 | struct amdgpu_irq_src priv_inst_irq; | 1202 | struct amdgpu_irq_src priv_inst_irq; |
1202 | /* gfx status */ | 1203 | /* gfx status */ |
1203 | uint32_t gfx_current_status; | 1204 | uint32_t gfx_current_status; |
1204 | /* sync signal for const engine */ | ||
1205 | unsigned ce_sync_offs; | ||
1206 | /* ce ram size*/ | 1205 | /* ce ram size*/ |
1207 | unsigned ce_ram_size; | 1206 | unsigned ce_ram_size; |
1208 | }; | 1207 | }; |
@@ -1274,8 +1273,10 @@ struct amdgpu_job { | |||
1274 | uint32_t num_ibs; | 1273 | uint32_t num_ibs; |
1275 | struct mutex job_lock; | 1274 | struct mutex job_lock; |
1276 | struct amdgpu_user_fence uf; | 1275 | struct amdgpu_user_fence uf; |
1277 | int (*free_job)(struct amdgpu_job *sched_job); | 1276 | int (*free_job)(struct amdgpu_job *job); |
1278 | }; | 1277 | }; |
1278 | #define to_amdgpu_job(sched_job) \ | ||
1279 | container_of((sched_job), struct amdgpu_job, base) | ||
1279 | 1280 | ||
1280 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) | 1281 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) |
1281 | { | 1282 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 496ed2192eba..84d68d658f8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | |||
183 | return -ENOMEM; | 183 | return -ENOMEM; |
184 | 184 | ||
185 | r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, | 185 | r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, |
186 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); | 186 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); |
187 | if (r) { | 187 | if (r) { |
188 | dev_err(rdev->dev, | 188 | dev_err(rdev->dev, |
189 | "failed to allocate BO for amdkfd (%d)\n", r); | 189 | "failed to allocate BO for amdkfd (%d)\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 77f1d7c6ea3a..9416e0f5c1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) | |||
672 | /* disp clock */ | 672 | /* disp clock */ |
673 | adev->clock.default_dispclk = | 673 | adev->clock.default_dispclk = |
674 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); | 674 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); |
675 | if (adev->clock.default_dispclk == 0) | 675 | /* set a reasonable default for DP */ |
676 | adev->clock.default_dispclk = 54000; /* 540 Mhz */ | 676 | if (adev->clock.default_dispclk < 53900) { |
677 | DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", | ||
678 | adev->clock.default_dispclk / 100); | ||
679 | adev->clock.default_dispclk = 60000; | ||
680 | } | ||
677 | adev->clock.dp_extclk = | 681 | adev->clock.dp_extclk = |
678 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | 682 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); |
679 | adev->clock.current_dispclk = adev->clock.default_dispclk; | 683 | adev->clock.current_dispclk = adev->clock.default_dispclk; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 98d59ee640ce..cd639c362df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | |||
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, | |||
79 | int time; | 79 | int time; |
80 | 80 | ||
81 | n = AMDGPU_BENCHMARK_ITERATIONS; | 81 | n = AMDGPU_BENCHMARK_ITERATIONS; |
82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); | 82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, |
83 | NULL, &sobj); | ||
83 | if (r) { | 84 | if (r) { |
84 | goto out_cleanup; | 85 | goto out_cleanup; |
85 | } | 86 | } |
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, | |||
91 | if (r) { | 92 | if (r) { |
92 | goto out_cleanup; | 93 | goto out_cleanup; |
93 | } | 94 | } |
94 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); | 95 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, |
96 | NULL, &dobj); | ||
95 | if (r) { | 97 | if (r) { |
96 | goto out_cleanup; | 98 | goto out_cleanup; |
97 | } | 99 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 6b1243f9f86d..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, | |||
86 | 86 | ||
87 | struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); | 87 | struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); |
88 | ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, | 88 | ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, |
89 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); | 89 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); |
90 | if (ret) | 90 | if (ret) |
91 | return ret; | 91 | return ret; |
92 | ret = amdgpu_bo_reserve(bo, false); | 92 | ret = amdgpu_bo_reserve(bo, false); |
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, | |||
197 | 197 | ||
198 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, | 198 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, |
199 | true, domain, flags, | 199 | true, domain, flags, |
200 | NULL, &placement, &obj); | 200 | NULL, &placement, NULL, |
201 | &obj); | ||
201 | if (ret) { | 202 | if (ret) { |
202 | DRM_ERROR("(%d) bo create failed\n", ret); | 203 | DRM_ERROR("(%d) bo create failed\n", ret); |
203 | return ret; | 204 | return ret; |
@@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, | |||
207 | return ret; | 208 | return ret; |
208 | } | 209 | } |
209 | 210 | ||
210 | static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, | ||
211 | cgs_handle_t *handle) | ||
212 | { | ||
213 | CGS_FUNC_ADEV; | ||
214 | int r; | ||
215 | uint32_t dma_handle; | ||
216 | struct drm_gem_object *obj; | ||
217 | struct amdgpu_bo *bo; | ||
218 | struct drm_device *dev = adev->ddev; | ||
219 | struct drm_file *file_priv = NULL, *priv; | ||
220 | |||
221 | mutex_lock(&dev->struct_mutex); | ||
222 | list_for_each_entry(priv, &dev->filelist, lhead) { | ||
223 | rcu_read_lock(); | ||
224 | if (priv->pid == get_pid(task_pid(current))) | ||
225 | file_priv = priv; | ||
226 | rcu_read_unlock(); | ||
227 | if (file_priv) | ||
228 | break; | ||
229 | } | ||
230 | mutex_unlock(&dev->struct_mutex); | ||
231 | r = dev->driver->prime_fd_to_handle(dev, | ||
232 | file_priv, dmabuf_fd, | ||
233 | &dma_handle); | ||
234 | spin_lock(&file_priv->table_lock); | ||
235 | |||
236 | /* Check if we currently have a reference on the object */ | ||
237 | obj = idr_find(&file_priv->object_idr, dma_handle); | ||
238 | if (obj == NULL) { | ||
239 | spin_unlock(&file_priv->table_lock); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | spin_unlock(&file_priv->table_lock); | ||
243 | bo = gem_to_amdgpu_bo(obj); | ||
244 | *handle = (cgs_handle_t)bo; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) | 211 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) |
249 | { | 212 | { |
250 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | 213 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; |
@@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
809 | }; | 772 | }; |
810 | 773 | ||
811 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | 774 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { |
812 | amdgpu_cgs_import_gpu_mem, | ||
813 | amdgpu_cgs_add_irq_source, | 775 | amdgpu_cgs_add_irq_source, |
814 | amdgpu_cgs_irq_get, | 776 | amdgpu_cgs_irq_get, |
815 | amdgpu_cgs_irq_put | 777 | amdgpu_cgs_irq_put |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3b355aeb62fd..fd16652aa277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
154 | { | 154 | { |
155 | union drm_amdgpu_cs *cs = data; | 155 | union drm_amdgpu_cs *cs = data; |
156 | uint64_t *chunk_array_user; | 156 | uint64_t *chunk_array_user; |
157 | uint64_t *chunk_array = NULL; | 157 | uint64_t *chunk_array; |
158 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 158 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
159 | unsigned size, i; | 159 | unsigned size; |
160 | int r = 0; | 160 | int i; |
161 | int ret; | ||
161 | 162 | ||
162 | if (!cs->in.num_chunks) | 163 | if (cs->in.num_chunks == 0) |
163 | goto out; | 164 | return 0; |
165 | |||
166 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | ||
167 | if (!chunk_array) | ||
168 | return -ENOMEM; | ||
164 | 169 | ||
165 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); | 170 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
166 | if (!p->ctx) { | 171 | if (!p->ctx) { |
167 | r = -EINVAL; | 172 | ret = -EINVAL; |
168 | goto out; | 173 | goto free_chunk; |
169 | } | 174 | } |
175 | |||
170 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); | 176 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); |
171 | 177 | ||
172 | /* get chunks */ | 178 | /* get chunks */ |
173 | INIT_LIST_HEAD(&p->validated); | 179 | INIT_LIST_HEAD(&p->validated); |
174 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | 180 | chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); |
175 | if (chunk_array == NULL) { | ||
176 | r = -ENOMEM; | ||
177 | goto out; | ||
178 | } | ||
179 | |||
180 | chunk_array_user = (uint64_t __user *)(cs->in.chunks); | ||
181 | if (copy_from_user(chunk_array, chunk_array_user, | 181 | if (copy_from_user(chunk_array, chunk_array_user, |
182 | sizeof(uint64_t)*cs->in.num_chunks)) { | 182 | sizeof(uint64_t)*cs->in.num_chunks)) { |
183 | r = -EFAULT; | 183 | ret = -EFAULT; |
184 | goto out; | 184 | goto put_bo_list; |
185 | } | 185 | } |
186 | 186 | ||
187 | p->nchunks = cs->in.num_chunks; | 187 | p->nchunks = cs->in.num_chunks; |
188 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), | 188 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
189 | GFP_KERNEL); | 189 | GFP_KERNEL); |
190 | if (p->chunks == NULL) { | 190 | if (!p->chunks) { |
191 | r = -ENOMEM; | 191 | ret = -ENOMEM; |
192 | goto out; | 192 | goto put_bo_list; |
193 | } | 193 | } |
194 | 194 | ||
195 | for (i = 0; i < p->nchunks; i++) { | 195 | for (i = 0; i < p->nchunks; i++) { |
@@ -197,28 +197,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
197 | struct drm_amdgpu_cs_chunk user_chunk; | 197 | struct drm_amdgpu_cs_chunk user_chunk; |
198 | uint32_t __user *cdata; | 198 | uint32_t __user *cdata; |
199 | 199 | ||
200 | chunk_ptr = (void __user *)chunk_array[i]; | 200 | chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; |
201 | if (copy_from_user(&user_chunk, chunk_ptr, | 201 | if (copy_from_user(&user_chunk, chunk_ptr, |
202 | sizeof(struct drm_amdgpu_cs_chunk))) { | 202 | sizeof(struct drm_amdgpu_cs_chunk))) { |
203 | r = -EFAULT; | 203 | ret = -EFAULT; |
204 | goto out; | 204 | i--; |
205 | goto free_partial_kdata; | ||
205 | } | 206 | } |
206 | p->chunks[i].chunk_id = user_chunk.chunk_id; | 207 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
207 | p->chunks[i].length_dw = user_chunk.length_dw; | 208 | p->chunks[i].length_dw = user_chunk.length_dw; |
208 | 209 | ||
209 | size = p->chunks[i].length_dw; | 210 | size = p->chunks[i].length_dw; |
210 | cdata = (void __user *)user_chunk.chunk_data; | 211 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
211 | p->chunks[i].user_ptr = cdata; | 212 | p->chunks[i].user_ptr = cdata; |
212 | 213 | ||
213 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | 214 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); |
214 | if (p->chunks[i].kdata == NULL) { | 215 | if (p->chunks[i].kdata == NULL) { |
215 | r = -ENOMEM; | 216 | ret = -ENOMEM; |
216 | goto out; | 217 | i--; |
218 | goto free_partial_kdata; | ||
217 | } | 219 | } |
218 | size *= sizeof(uint32_t); | 220 | size *= sizeof(uint32_t); |
219 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | 221 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
220 | r = -EFAULT; | 222 | ret = -EFAULT; |
221 | goto out; | 223 | goto free_partial_kdata; |
222 | } | 224 | } |
223 | 225 | ||
224 | switch (p->chunks[i].chunk_id) { | 226 | switch (p->chunks[i].chunk_id) { |
@@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
238 | gobj = drm_gem_object_lookup(p->adev->ddev, | 240 | gobj = drm_gem_object_lookup(p->adev->ddev, |
239 | p->filp, handle); | 241 | p->filp, handle); |
240 | if (gobj == NULL) { | 242 | if (gobj == NULL) { |
241 | r = -EINVAL; | 243 | ret = -EINVAL; |
242 | goto out; | 244 | goto free_partial_kdata; |
243 | } | 245 | } |
244 | 246 | ||
245 | p->uf.bo = gem_to_amdgpu_bo(gobj); | 247 | p->uf.bo = gem_to_amdgpu_bo(gobj); |
246 | p->uf.offset = fence_data->offset; | 248 | p->uf.offset = fence_data->offset; |
247 | } else { | 249 | } else { |
248 | r = -EINVAL; | 250 | ret = -EINVAL; |
249 | goto out; | 251 | goto free_partial_kdata; |
250 | } | 252 | } |
251 | break; | 253 | break; |
252 | 254 | ||
@@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
254 | break; | 256 | break; |
255 | 257 | ||
256 | default: | 258 | default: |
257 | r = -EINVAL; | 259 | ret = -EINVAL; |
258 | goto out; | 260 | goto free_partial_kdata; |
259 | } | 261 | } |
260 | } | 262 | } |
261 | 263 | ||
262 | 264 | ||
263 | p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); | 265 | p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); |
264 | if (!p->ibs) | 266 | if (!p->ibs) { |
265 | r = -ENOMEM; | 267 | ret = -ENOMEM; |
268 | goto free_all_kdata; | ||
269 | } | ||
266 | 270 | ||
267 | out: | ||
268 | kfree(chunk_array); | 271 | kfree(chunk_array); |
269 | return r; | 272 | return 0; |
273 | |||
274 | free_all_kdata: | ||
275 | i = p->nchunks - 1; | ||
276 | free_partial_kdata: | ||
277 | for (; i >= 0; i--) | ||
278 | drm_free_large(p->chunks[i].kdata); | ||
279 | kfree(p->chunks); | ||
280 | put_bo_list: | ||
281 | if (p->bo_list) | ||
282 | amdgpu_bo_list_put(p->bo_list); | ||
283 | amdgpu_ctx_put(p->ctx); | ||
284 | free_chunk: | ||
285 | kfree(chunk_array); | ||
286 | |||
287 | return ret; | ||
270 | } | 288 | } |
271 | 289 | ||
272 | /* Returns how many bytes TTM can move per IB. | 290 | /* Returns how many bytes TTM can move per IB. |
@@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |||
321 | return max(bytes_moved_threshold, 1024*1024ull); | 339 | return max(bytes_moved_threshold, 1024*1024ull); |
322 | } | 340 | } |
323 | 341 | ||
324 | int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) | 342 | int amdgpu_cs_list_validate(struct amdgpu_device *adev, |
343 | struct amdgpu_vm *vm, | ||
344 | struct list_head *validated) | ||
325 | { | 345 | { |
326 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | ||
327 | struct amdgpu_vm *vm = &fpriv->vm; | ||
328 | struct amdgpu_device *adev = p->adev; | ||
329 | struct amdgpu_bo_list_entry *lobj; | 346 | struct amdgpu_bo_list_entry *lobj; |
330 | struct list_head duplicates; | ||
331 | struct amdgpu_bo *bo; | 347 | struct amdgpu_bo *bo; |
332 | u64 bytes_moved = 0, initial_bytes_moved; | 348 | u64 bytes_moved = 0, initial_bytes_moved; |
333 | u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); | 349 | u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); |
334 | int r; | 350 | int r; |
335 | 351 | ||
336 | INIT_LIST_HEAD(&duplicates); | 352 | list_for_each_entry(lobj, validated, tv.head) { |
337 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); | ||
338 | if (unlikely(r != 0)) { | ||
339 | return r; | ||
340 | } | ||
341 | |||
342 | list_for_each_entry(lobj, &p->validated, tv.head) { | ||
343 | bo = lobj->robj; | 353 | bo = lobj->robj; |
344 | if (!bo->pin_count) { | 354 | if (!bo->pin_count) { |
345 | u32 domain = lobj->prefered_domains; | 355 | u32 domain = lobj->prefered_domains; |
@@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) | |||
373 | domain = lobj->allowed_domains; | 383 | domain = lobj->allowed_domains; |
374 | goto retry; | 384 | goto retry; |
375 | } | 385 | } |
376 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | ||
377 | return r; | 386 | return r; |
378 | } | 387 | } |
379 | } | 388 | } |
@@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
386 | { | 395 | { |
387 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 396 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
388 | struct amdgpu_cs_buckets buckets; | 397 | struct amdgpu_cs_buckets buckets; |
398 | struct list_head duplicates; | ||
389 | bool need_mmap_lock = false; | 399 | bool need_mmap_lock = false; |
390 | int i, r; | 400 | int i, r; |
391 | 401 | ||
@@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
405 | if (need_mmap_lock) | 415 | if (need_mmap_lock) |
406 | down_read(¤t->mm->mmap_sem); | 416 | down_read(¤t->mm->mmap_sem); |
407 | 417 | ||
408 | r = amdgpu_cs_list_validate(p); | 418 | INIT_LIST_HEAD(&duplicates); |
419 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); | ||
420 | if (unlikely(r != 0)) | ||
421 | goto error_reserve; | ||
422 | |||
423 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); | ||
424 | if (r) | ||
425 | goto error_validate; | ||
426 | |||
427 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); | ||
428 | |||
429 | error_validate: | ||
430 | if (r) | ||
431 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | ||
409 | 432 | ||
433 | error_reserve: | ||
410 | if (need_mmap_lock) | 434 | if (need_mmap_lock) |
411 | up_read(¤t->mm->mmap_sem); | 435 | up_read(¤t->mm->mmap_sem); |
412 | 436 | ||
@@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
772 | return 0; | 796 | return 0; |
773 | } | 797 | } |
774 | 798 | ||
775 | static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) | 799 | static int amdgpu_cs_free_job(struct amdgpu_job *job) |
776 | { | 800 | { |
777 | int i; | 801 | int i; |
778 | if (sched_job->ibs) | 802 | if (job->ibs) |
779 | for (i = 0; i < sched_job->num_ibs; i++) | 803 | for (i = 0; i < job->num_ibs; i++) |
780 | amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); | 804 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
781 | kfree(sched_job->ibs); | 805 | kfree(job->ibs); |
782 | if (sched_job->uf.bo) | 806 | if (job->uf.bo) |
783 | drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); | 807 | drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); |
784 | return 0; | 808 | return 0; |
785 | } | 809 | } |
786 | 810 | ||
@@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
804 | r = amdgpu_cs_parser_init(parser, data); | 828 | r = amdgpu_cs_parser_init(parser, data); |
805 | if (r) { | 829 | if (r) { |
806 | DRM_ERROR("Failed to initialize parser !\n"); | 830 | DRM_ERROR("Failed to initialize parser !\n"); |
807 | amdgpu_cs_parser_fini(parser, r, false); | 831 | kfree(parser); |
808 | up_read(&adev->exclusive_lock); | 832 | up_read(&adev->exclusive_lock); |
809 | r = amdgpu_cs_handle_lockup(adev, r); | 833 | r = amdgpu_cs_handle_lockup(adev, r); |
810 | return r; | 834 | return r; |
@@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
842 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 866 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
843 | if (!job) | 867 | if (!job) |
844 | return -ENOMEM; | 868 | return -ENOMEM; |
845 | job->base.sched = ring->scheduler; | 869 | job->base.sched = &ring->sched; |
846 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; | 870 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; |
847 | job->adev = parser->adev; | 871 | job->adev = parser->adev; |
848 | job->ibs = parser->ibs; | 872 | job->ibs = parser->ibs; |
@@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
857 | 881 | ||
858 | job->free_job = amdgpu_cs_free_job; | 882 | job->free_job = amdgpu_cs_free_job; |
859 | mutex_lock(&job->job_lock); | 883 | mutex_lock(&job->job_lock); |
860 | r = amd_sched_entity_push_job((struct amd_sched_job *)job); | 884 | r = amd_sched_entity_push_job(&job->base); |
861 | if (r) { | 885 | if (r) { |
862 | mutex_unlock(&job->job_lock); | 886 | mutex_unlock(&job->job_lock); |
863 | amdgpu_cs_free_job(job); | 887 | amdgpu_cs_free_job(job); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 20cbc4eb5a6f..e0b80ccdfe8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, | |||
43 | for (i = 0; i < adev->num_rings; i++) { | 43 | for (i = 0; i < adev->num_rings; i++) { |
44 | struct amd_sched_rq *rq; | 44 | struct amd_sched_rq *rq; |
45 | if (kernel) | 45 | if (kernel) |
46 | rq = &adev->rings[i]->scheduler->kernel_rq; | 46 | rq = &adev->rings[i]->sched.kernel_rq; |
47 | else | 47 | else |
48 | rq = &adev->rings[i]->scheduler->sched_rq; | 48 | rq = &adev->rings[i]->sched.sched_rq; |
49 | r = amd_sched_entity_init(adev->rings[i]->scheduler, | 49 | r = amd_sched_entity_init(&adev->rings[i]->sched, |
50 | &ctx->rings[i].entity, | 50 | &ctx->rings[i].entity, |
51 | rq, amdgpu_sched_jobs); | 51 | rq, amdgpu_sched_jobs); |
52 | if (r) | 52 | if (r) |
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, | |||
55 | 55 | ||
56 | if (i < adev->num_rings) { | 56 | if (i < adev->num_rings) { |
57 | for (j = 0; j < i; j++) | 57 | for (j = 0; j < i; j++) |
58 | amd_sched_entity_fini(adev->rings[j]->scheduler, | 58 | amd_sched_entity_fini(&adev->rings[j]->sched, |
59 | &ctx->rings[j].entity); | 59 | &ctx->rings[j].entity); |
60 | kfree(ctx); | 60 | kfree(ctx); |
61 | return r; | 61 | return r; |
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
75 | 75 | ||
76 | if (amdgpu_enable_scheduler) { | 76 | if (amdgpu_enable_scheduler) { |
77 | for (i = 0; i < adev->num_rings; i++) | 77 | for (i = 0; i < adev->num_rings; i++) |
78 | amd_sched_entity_fini(adev->rings[i]->scheduler, | 78 | amd_sched_entity_fini(&adev->rings[i]->sched, |
79 | &ctx->rings[i].entity); | 79 | &ctx->rings[i].entity); |
80 | } | 80 | } |
81 | } | 81 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6ff6ae945794..6068d8207d10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |||
246 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | 246 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, |
247 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 247 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
249 | NULL, &adev->vram_scratch.robj); | 249 | NULL, NULL, &adev->vram_scratch.robj); |
250 | if (r) { | 250 | if (r) { |
251 | return r; | 251 | return r; |
252 | } | 252 | } |
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
449 | 449 | ||
450 | if (adev->wb.wb_obj == NULL) { | 450 | if (adev->wb.wb_obj == NULL) { |
451 | r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, | 451 | r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, |
452 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); | 452 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
453 | &adev->wb.wb_obj); | ||
453 | if (r) { | 454 | if (r) { |
454 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); | 455 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); |
455 | return r; | 456 | return r; |
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1650 | drm_kms_helper_poll_disable(dev); | 1651 | drm_kms_helper_poll_disable(dev); |
1651 | 1652 | ||
1652 | /* turn off display hw */ | 1653 | /* turn off display hw */ |
1654 | drm_modeset_lock_all(dev); | ||
1653 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1655 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1654 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 1656 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
1655 | } | 1657 | } |
1658 | drm_modeset_unlock_all(dev); | ||
1656 | 1659 | ||
1657 | /* unpin the front buffers */ | 1660 | /* unpin the front buffers */ |
1658 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 1661 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1747 | if (fbcon) { | 1750 | if (fbcon) { |
1748 | drm_helper_resume_force_mode(dev); | 1751 | drm_helper_resume_force_mode(dev); |
1749 | /* turn on display hw */ | 1752 | /* turn on display hw */ |
1753 | drm_modeset_lock_all(dev); | ||
1750 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1754 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1751 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 1755 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
1752 | } | 1756 | } |
1757 | drm_modeset_unlock_all(dev); | ||
1753 | } | 1758 | } |
1754 | 1759 | ||
1755 | drm_kms_helper_poll_enable(dev); | 1760 | drm_kms_helper_poll_enable(dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e3d70772b531..dc29ed8145c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
85 | /* We borrow the event spin lock for protecting flip_status */ | 85 | /* We borrow the event spin lock for protecting flip_status */ |
86 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 86 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
87 | 87 | ||
88 | /* set the proper interrupt */ | ||
89 | amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); | ||
90 | /* do the flip (mmio) */ | 88 | /* do the flip (mmio) */ |
91 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); | 89 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); |
92 | /* set the flip status */ | 90 | /* set the flip status */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0fcc0bd1622c..b190c2a83680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0; | |||
79 | int amdgpu_enable_scheduler = 0; | 79 | int amdgpu_enable_scheduler = 0; |
80 | int amdgpu_sched_jobs = 16; | 80 | int amdgpu_sched_jobs = 16; |
81 | int amdgpu_sched_hw_submission = 2; | 81 | int amdgpu_sched_hw_submission = 2; |
82 | int amdgpu_enable_semaphores = 1; | ||
82 | 83 | ||
83 | MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); | 84 | MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); |
84 | module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); | 85 | module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); |
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); | |||
152 | MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); | 153 | MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); |
153 | module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); | 154 | module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); |
154 | 155 | ||
156 | MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); | ||
157 | module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); | ||
158 | |||
155 | static struct pci_device_id pciidlist[] = { | 159 | static struct pci_device_id pciidlist[] = { |
156 | #ifdef CONFIG_DRM_AMDGPU_CIK | 160 | #ifdef CONFIG_DRM_AMDGPU_CIK |
157 | /* Kaveri */ | 161 | /* Kaveri */ |
@@ -238,11 +242,11 @@ static struct pci_device_id pciidlist[] = { | |||
238 | {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, | 242 | {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, |
239 | #endif | 243 | #endif |
240 | /* topaz */ | 244 | /* topaz */ |
241 | {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, | 245 | {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, |
242 | {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, | 246 | {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, |
243 | {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, | 247 | {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, |
244 | {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, | 248 | {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, |
245 | {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, | 249 | {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, |
246 | /* tonga */ | 250 | /* tonga */ |
247 | {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, | 251 | {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, |
248 | {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, | 252 | {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8a122b1b7786..96290d9cddca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) | |||
402 | return true; | 402 | return true; |
403 | return false; | 403 | return false; |
404 | } | 404 | } |
405 | |||
406 | void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) | ||
407 | { | ||
408 | struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; | ||
409 | struct drm_fb_helper *fb_helper; | ||
410 | int ret; | ||
411 | |||
412 | if (!afbdev) | ||
413 | return; | ||
414 | |||
415 | fb_helper = &afbdev->helper; | ||
416 | |||
417 | ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); | ||
418 | if (ret) | ||
419 | DRM_DEBUG("failed to restore crtc mode\n"); | ||
420 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1be2bd6d07ea..b3fc26c59787 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
609 | * Init the fence driver for the requested ring (all asics). | 609 | * Init the fence driver for the requested ring (all asics). |
610 | * Helper function for amdgpu_fence_driver_init(). | 610 | * Helper function for amdgpu_fence_driver_init(). |
611 | */ | 611 | */ |
612 | void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | 612 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) |
613 | { | 613 | { |
614 | int i; | 614 | int i, r; |
615 | 615 | ||
616 | ring->fence_drv.cpu_addr = NULL; | 616 | ring->fence_drv.cpu_addr = NULL; |
617 | ring->fence_drv.gpu_addr = 0; | 617 | ring->fence_drv.gpu_addr = 0; |
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
625 | amdgpu_fence_check_lockup); | 625 | amdgpu_fence_check_lockup); |
626 | ring->fence_drv.ring = ring; | 626 | ring->fence_drv.ring = ring; |
627 | 627 | ||
628 | init_waitqueue_head(&ring->fence_drv.fence_queue); | ||
629 | |||
628 | if (amdgpu_enable_scheduler) { | 630 | if (amdgpu_enable_scheduler) { |
629 | ring->scheduler = amd_sched_create(&amdgpu_sched_ops, | 631 | r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, |
630 | ring->idx, | 632 | amdgpu_sched_hw_submission, ring->name); |
631 | amdgpu_sched_hw_submission, | 633 | if (r) { |
632 | (void *)ring->adev); | 634 | DRM_ERROR("Failed to create scheduler on ring %s.\n", |
633 | if (!ring->scheduler) | 635 | ring->name); |
634 | DRM_ERROR("Failed to create scheduler on ring %d.\n", | 636 | return r; |
635 | ring->idx); | 637 | } |
636 | } | 638 | } |
639 | |||
640 | return 0; | ||
637 | } | 641 | } |
638 | 642 | ||
639 | /** | 643 | /** |
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
681 | wake_up_all(&ring->fence_drv.fence_queue); | 685 | wake_up_all(&ring->fence_drv.fence_queue); |
682 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | 686 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
683 | ring->fence_drv.irq_type); | 687 | ring->fence_drv.irq_type); |
684 | if (ring->scheduler) | 688 | amd_sched_fini(&ring->sched); |
685 | amd_sched_destroy(ring->scheduler); | ||
686 | ring->fence_drv.initialized = false; | 689 | ring->fence_drv.initialized = false; |
687 | } | 690 | } |
688 | mutex_unlock(&adev->ring_lock); | 691 | mutex_unlock(&adev->ring_lock); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index cbd3a486c5c2..7312d729d300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) | |||
127 | r = amdgpu_bo_create(adev, adev->gart.table_size, | 127 | r = amdgpu_bo_create(adev, adev->gart.table_size, |
128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
130 | NULL, &adev->gart.robj); | 130 | NULL, NULL, &adev->gart.robj); |
131 | if (r) { | 131 | if (r) { |
132 | return r; | 132 | return r; |
133 | } | 133 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5839fab374bf..7297ca3a0ba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | |||
69 | } | 69 | } |
70 | } | 70 | } |
71 | retry: | 71 | retry: |
72 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); | 72 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, |
73 | flags, NULL, NULL, &robj); | ||
73 | if (r) { | 74 | if (r) { |
74 | if (r != -ERESTARTSYS) { | 75 | if (r != -ERESTARTSYS) { |
75 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { | 76 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { |
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | |||
426 | &args->data.data_size_bytes, | 427 | &args->data.data_size_bytes, |
427 | &args->data.flags); | 428 | &args->data.flags); |
428 | } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { | 429 | } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { |
430 | if (args->data.data_size_bytes > sizeof(args->data.data)) { | ||
431 | r = -EINVAL; | ||
432 | goto unreserve; | ||
433 | } | ||
429 | r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); | 434 | r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); |
430 | if (!r) | 435 | if (!r) |
431 | r = amdgpu_bo_set_metadata(robj, args->data.data, | 436 | r = amdgpu_bo_set_metadata(robj, args->data.data, |
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | |||
433 | args->data.flags); | 438 | args->data.flags); |
434 | } | 439 | } |
435 | 440 | ||
441 | unreserve: | ||
436 | amdgpu_bo_unreserve(robj); | 442 | amdgpu_bo_unreserve(robj); |
437 | out: | 443 | out: |
438 | drm_gem_object_unreference_unlocked(gobj); | 444 | drm_gem_object_unreference_unlocked(gobj); |
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
454 | struct ttm_validate_buffer tv, *entry; | 460 | struct ttm_validate_buffer tv, *entry; |
455 | struct amdgpu_bo_list_entry *vm_bos; | 461 | struct amdgpu_bo_list_entry *vm_bos; |
456 | struct ww_acquire_ctx ticket; | 462 | struct ww_acquire_ctx ticket; |
457 | struct list_head list; | 463 | struct list_head list, duplicates; |
458 | unsigned domain; | 464 | unsigned domain; |
459 | int r; | 465 | int r; |
460 | 466 | ||
461 | INIT_LIST_HEAD(&list); | 467 | INIT_LIST_HEAD(&list); |
468 | INIT_LIST_HEAD(&duplicates); | ||
462 | 469 | ||
463 | tv.bo = &bo_va->bo->tbo; | 470 | tv.bo = &bo_va->bo->tbo; |
464 | tv.shared = true; | 471 | tv.shared = true; |
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
468 | if (!vm_bos) | 475 | if (!vm_bos) |
469 | return; | 476 | return; |
470 | 477 | ||
471 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | 478 | /* Provide duplicates to avoid -EALREADY */ |
479 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | ||
472 | if (r) | 480 | if (r) |
473 | goto error_free; | 481 | goto error_free; |
474 | 482 | ||
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
651 | int r; | 659 | int r; |
652 | 660 | ||
653 | args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | 661 | args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); |
654 | args->size = args->pitch * args->height; | 662 | args->size = (u64)args->pitch * args->height; |
655 | args->size = ALIGN(args->size, PAGE_SIZE); | 663 | args->size = ALIGN(args->size, PAGE_SIZE); |
656 | 664 | ||
657 | r = amdgpu_gem_object_create(adev, args->size, 0, | 665 | r = amdgpu_gem_object_create(adev, args->size, 0, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 5c8a803acedc..534fc04e80fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | |||
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) | |||
43 | r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, | 43 | r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, |
44 | PAGE_SIZE, true, | 44 | PAGE_SIZE, true, |
45 | AMDGPU_GEM_DOMAIN_GTT, 0, | 45 | AMDGPU_GEM_DOMAIN_GTT, 0, |
46 | NULL, &adev->irq.ih.ring_obj); | 46 | NULL, NULL, &adev->irq.ih.ring_obj); |
47 | if (r) { | 47 | if (r) { |
48 | DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); | 48 | DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); |
49 | return r; | 49 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 0aba8e9bc8a0..7c42ff670080 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev) | |||
140 | */ | 140 | */ |
141 | int amdgpu_irq_postinstall(struct drm_device *dev) | 141 | int amdgpu_irq_postinstall(struct drm_device *dev) |
142 | { | 142 | { |
143 | dev->max_vblank_count = 0x001fffff; | 143 | dev->max_vblank_count = 0x00ffffff; |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 22367939ebf1..5d11e798230c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
390 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; | 390 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; |
391 | } | 391 | } |
392 | case AMDGPU_INFO_READ_MMR_REG: { | 392 | case AMDGPU_INFO_READ_MMR_REG: { |
393 | unsigned n, alloc_size = info->read_mmr_reg.count * 4; | 393 | unsigned n, alloc_size; |
394 | uint32_t *regs; | 394 | uint32_t *regs; |
395 | unsigned se_num = (info->read_mmr_reg.instance >> | 395 | unsigned se_num = (info->read_mmr_reg.instance >> |
396 | AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & | 396 | AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & |
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
406 | if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) | 406 | if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) |
407 | sh_num = 0xffffffff; | 407 | sh_num = 0xffffffff; |
408 | 408 | ||
409 | regs = kmalloc(alloc_size, GFP_KERNEL); | 409 | regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); |
410 | if (!regs) | 410 | if (!regs) |
411 | return -ENOMEM; | 411 | return -ENOMEM; |
412 | alloc_size = info->read_mmr_reg.count * sizeof(*regs); | ||
412 | 413 | ||
413 | for (i = 0; i < info->read_mmr_reg.count; i++) | 414 | for (i = 0; i < info->read_mmr_reg.count; i++) |
414 | if (amdgpu_asic_read_register(adev, se_num, sh_num, | 415 | if (amdgpu_asic_read_register(adev, se_num, sh_num, |
@@ -484,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
484 | * Outdated mess for old drm with Xorg being in charge (void function now). | 485 | * Outdated mess for old drm with Xorg being in charge (void function now). |
485 | */ | 486 | */ |
486 | /** | 487 | /** |
487 | * amdgpu_driver_firstopen_kms - drm callback for last close | 488 | * amdgpu_driver_lastclose_kms - drm callback for last close |
488 | * | 489 | * |
489 | * @dev: drm dev pointer | 490 | * @dev: drm dev pointer |
490 | * | 491 | * |
@@ -492,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
492 | */ | 493 | */ |
493 | void amdgpu_driver_lastclose_kms(struct drm_device *dev) | 494 | void amdgpu_driver_lastclose_kms(struct drm_device *dev) |
494 | { | 495 | { |
496 | struct amdgpu_device *adev = dev->dev_private; | ||
497 | |||
498 | amdgpu_fbdev_restore_mode(adev); | ||
495 | vga_switcheroo_process_delayed_switch(); | 499 | vga_switcheroo_process_delayed_switch(); |
496 | } | 500 | } |
497 | 501 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52e65..7bd470d9ac30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); | |||
567 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); | 567 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); |
568 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev); | 568 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev); |
569 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); | 569 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); |
570 | void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); | ||
570 | 571 | ||
571 | void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); | 572 | void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); |
572 | 573 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 08b09d55b96f..1a7708f365f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
215 | bool kernel, u32 domain, u64 flags, | 215 | bool kernel, u32 domain, u64 flags, |
216 | struct sg_table *sg, | 216 | struct sg_table *sg, |
217 | struct ttm_placement *placement, | 217 | struct ttm_placement *placement, |
218 | struct reservation_object *resv, | ||
218 | struct amdgpu_bo **bo_ptr) | 219 | struct amdgpu_bo **bo_ptr) |
219 | { | 220 | { |
220 | struct amdgpu_bo *bo; | 221 | struct amdgpu_bo *bo; |
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
261 | /* Kernel allocation are uninterruptible */ | 262 | /* Kernel allocation are uninterruptible */ |
262 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, | 263 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
263 | &bo->placement, page_align, !kernel, NULL, | 264 | &bo->placement, page_align, !kernel, NULL, |
264 | acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); | 265 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
265 | if (unlikely(r != 0)) { | 266 | if (unlikely(r != 0)) { |
266 | return r; | 267 | return r; |
267 | } | 268 | } |
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
275 | int amdgpu_bo_create(struct amdgpu_device *adev, | 276 | int amdgpu_bo_create(struct amdgpu_device *adev, |
276 | unsigned long size, int byte_align, | 277 | unsigned long size, int byte_align, |
277 | bool kernel, u32 domain, u64 flags, | 278 | bool kernel, u32 domain, u64 flags, |
278 | struct sg_table *sg, struct amdgpu_bo **bo_ptr) | 279 | struct sg_table *sg, |
280 | struct reservation_object *resv, | ||
281 | struct amdgpu_bo **bo_ptr) | ||
279 | { | 282 | { |
280 | struct ttm_placement placement = {0}; | 283 | struct ttm_placement placement = {0}; |
281 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | 284 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; |
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
286 | amdgpu_ttm_placement_init(adev, &placement, | 289 | amdgpu_ttm_placement_init(adev, &placement, |
287 | placements, domain, flags); | 290 | placements, domain, flags); |
288 | 291 | ||
289 | return amdgpu_bo_create_restricted(adev, size, byte_align, | 292 | return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
290 | kernel, domain, flags, | 293 | domain, flags, sg, &placement, |
291 | sg, | 294 | resv, bo_ptr); |
292 | &placement, | ||
293 | bo_ptr); | ||
294 | } | 295 | } |
295 | 296 | ||
296 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | 297 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |||
535 | if (metadata == NULL) | 536 | if (metadata == NULL) |
536 | return -EINVAL; | 537 | return -EINVAL; |
537 | 538 | ||
538 | buffer = kzalloc(metadata_size, GFP_KERNEL); | 539 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); |
539 | if (buffer == NULL) | 540 | if (buffer == NULL) |
540 | return -ENOMEM; | 541 | return -ENOMEM; |
541 | 542 | ||
542 | memcpy(buffer, metadata, metadata_size); | ||
543 | |||
544 | kfree(bo->metadata); | 543 | kfree(bo->metadata); |
545 | bo->metadata_flags = flags; | 544 | bo->metadata_flags = flags; |
546 | bo->metadata = buffer; | 545 | bo->metadata = buffer; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 6ea18dcec561..3c2ff4567798 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
129 | unsigned long size, int byte_align, | 129 | unsigned long size, int byte_align, |
130 | bool kernel, u32 domain, u64 flags, | 130 | bool kernel, u32 domain, u64 flags, |
131 | struct sg_table *sg, | 131 | struct sg_table *sg, |
132 | struct reservation_object *resv, | ||
132 | struct amdgpu_bo **bo_ptr); | 133 | struct amdgpu_bo **bo_ptr); |
133 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | 134 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
134 | unsigned long size, int byte_align, | 135 | unsigned long size, int byte_align, |
135 | bool kernel, u32 domain, u64 flags, | 136 | bool kernel, u32 domain, u64 flags, |
136 | struct sg_table *sg, | 137 | struct sg_table *sg, |
137 | struct ttm_placement *placement, | 138 | struct ttm_placement *placement, |
139 | struct reservation_object *resv, | ||
138 | struct amdgpu_bo **bo_ptr); | 140 | struct amdgpu_bo **bo_ptr); |
139 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); | 141 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
140 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); | 142 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index d9652fe32d6a..59f735a933a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
61 | struct dma_buf_attachment *attach, | 61 | struct dma_buf_attachment *attach, |
62 | struct sg_table *sg) | 62 | struct sg_table *sg) |
63 | { | 63 | { |
64 | struct reservation_object *resv = attach->dmabuf->resv; | ||
64 | struct amdgpu_device *adev = dev->dev_private; | 65 | struct amdgpu_device *adev = dev->dev_private; |
65 | struct amdgpu_bo *bo; | 66 | struct amdgpu_bo *bo; |
66 | int ret; | 67 | int ret; |
67 | 68 | ||
69 | ww_mutex_lock(&resv->lock, NULL); | ||
68 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, | 70 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, |
69 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); | 71 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); |
72 | ww_mutex_unlock(&resv->lock); | ||
70 | if (ret) | 73 | if (ret) |
71 | return ERR_PTR(ret); | 74 | return ERR_PTR(ret); |
72 | 75 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 9bec91484c24..30dce235ddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
357 | ring->adev = adev; | 357 | ring->adev = adev; |
358 | ring->idx = adev->num_rings++; | 358 | ring->idx = adev->num_rings++; |
359 | adev->rings[ring->idx] = ring; | 359 | adev->rings[ring->idx] = ring; |
360 | amdgpu_fence_driver_init_ring(ring); | 360 | r = amdgpu_fence_driver_init_ring(ring); |
361 | if (r) | ||
362 | return r; | ||
361 | } | 363 | } |
362 | 364 | ||
363 | init_waitqueue_head(&ring->fence_drv.fence_queue); | ||
364 | |||
365 | r = amdgpu_wb_get(adev, &ring->rptr_offs); | 365 | r = amdgpu_wb_get(adev, &ring->rptr_offs); |
366 | if (r) { | 366 | if (r) { |
367 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | 367 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); |
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
407 | if (ring->ring_obj == NULL) { | 407 | if (ring->ring_obj == NULL) { |
408 | r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, | 408 | r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, |
409 | AMDGPU_GEM_DOMAIN_GTT, 0, | 409 | AMDGPU_GEM_DOMAIN_GTT, 0, |
410 | NULL, &ring->ring_obj); | 410 | NULL, NULL, &ring->ring_obj); |
411 | if (r) { | 411 | if (r) { |
412 | dev_err(adev->dev, "(%d) ring create failed\n", r); | 412 | dev_err(adev->dev, "(%d) ring create failed\n", r); |
413 | return r; | 413 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 74dad270362c..e90712443fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | |||
64 | INIT_LIST_HEAD(&sa_manager->flist[i]); | 64 | INIT_LIST_HEAD(&sa_manager->flist[i]); |
65 | } | 65 | } |
66 | 66 | ||
67 | r = amdgpu_bo_create(adev, size, align, true, | 67 | r = amdgpu_bo_create(adev, size, align, true, domain, |
68 | domain, 0, NULL, &sa_manager->bo); | 68 | 0, NULL, NULL, &sa_manager->bo); |
69 | if (r) { | 69 | if (r) { |
70 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); | 70 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); |
71 | return r; | 71 | return r; |
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) | |||
145 | struct amd_sched_fence *s_fence; | 145 | struct amd_sched_fence *s_fence; |
146 | 146 | ||
147 | s_fence = to_amd_sched_fence(f); | 147 | s_fence = to_amd_sched_fence(f); |
148 | if (s_fence) | 148 | if (s_fence) { |
149 | return s_fence->scheduler->ring_id; | 149 | struct amdgpu_ring *ring; |
150 | |||
151 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | ||
152 | return ring->idx; | ||
153 | } | ||
154 | |||
150 | a_fence = to_amdgpu_fence(f); | 155 | a_fence = to_amdgpu_fence(f); |
151 | if (a_fence) | 156 | if (a_fence) |
152 | return a_fence->ring->idx; | 157 | return a_fence->ring->idx; |
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | |||
412 | } | 417 | } |
413 | 418 | ||
414 | #if defined(CONFIG_DEBUG_FS) | 419 | #if defined(CONFIG_DEBUG_FS) |
420 | |||
421 | static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m) | ||
422 | { | ||
423 | struct amdgpu_fence *a_fence = to_amdgpu_fence(fence); | ||
424 | struct amd_sched_fence *s_fence = to_amd_sched_fence(fence); | ||
425 | |||
426 | if (a_fence) | ||
427 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
428 | a_fence->seq, a_fence->ring->idx); | ||
429 | |||
430 | if (s_fence) { | ||
431 | struct amdgpu_ring *ring; | ||
432 | |||
433 | |||
434 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | ||
435 | seq_printf(m, " protected by 0x%016x on ring %d", | ||
436 | s_fence->base.seqno, ring->idx); | ||
437 | } | ||
438 | } | ||
439 | |||
415 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | 440 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
416 | struct seq_file *m) | 441 | struct seq_file *m) |
417 | { | 442 | { |
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | |||
428 | } | 453 | } |
429 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", | 454 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", |
430 | soffset, eoffset, eoffset - soffset); | 455 | soffset, eoffset, eoffset - soffset); |
431 | if (i->fence) { | 456 | if (i->fence) |
432 | struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); | 457 | amdgpu_sa_bo_dump_fence(i->fence, m); |
433 | struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence); | ||
434 | if (a_fence) | ||
435 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
436 | a_fence->seq, a_fence->ring->idx); | ||
437 | if (s_fence) | ||
438 | seq_printf(m, " protected by 0x%016x on ring %d", | ||
439 | s_fence->base.seqno, | ||
440 | s_fence->scheduler->ring_id); | ||
441 | |||
442 | } | ||
443 | seq_printf(m, "\n"); | 458 | seq_printf(m, "\n"); |
444 | } | 459 | } |
445 | spin_unlock(&sa_manager->wq.lock); | 460 | spin_unlock(&sa_manager->wq.lock); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index de98fbd2971e..2e946b2cad88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -27,63 +27,48 @@ | |||
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
29 | 29 | ||
30 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) | 30 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) |
31 | { | 31 | { |
32 | struct amdgpu_job *sched_job = (struct amdgpu_job *)job; | 32 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
33 | return amdgpu_sync_get_fence(&sched_job->ibs->sync); | 33 | return amdgpu_sync_get_fence(&job->ibs->sync); |
34 | } | 34 | } |
35 | 35 | ||
36 | static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) | 36 | static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) |
37 | { | 37 | { |
38 | struct amdgpu_job *sched_job; | 38 | struct amdgpu_fence *fence = NULL; |
39 | struct amdgpu_fence *fence; | 39 | struct amdgpu_job *job; |
40 | int r; | 40 | int r; |
41 | 41 | ||
42 | if (!job) { | 42 | if (!sched_job) { |
43 | DRM_ERROR("job is null\n"); | 43 | DRM_ERROR("job is null\n"); |
44 | return NULL; | 44 | return NULL; |
45 | } | 45 | } |
46 | sched_job = (struct amdgpu_job *)job; | 46 | job = to_amdgpu_job(sched_job); |
47 | mutex_lock(&sched_job->job_lock); | 47 | mutex_lock(&job->job_lock); |
48 | r = amdgpu_ib_schedule(sched_job->adev, | 48 | r = amdgpu_ib_schedule(job->adev, |
49 | sched_job->num_ibs, | 49 | job->num_ibs, |
50 | sched_job->ibs, | 50 | job->ibs, |
51 | sched_job->base.owner); | 51 | job->base.owner); |
52 | if (r) | 52 | if (r) { |
53 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | ||
53 | goto err; | 54 | goto err; |
54 | fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); | 55 | } |
55 | |||
56 | if (sched_job->free_job) | ||
57 | sched_job->free_job(sched_job); | ||
58 | 56 | ||
59 | mutex_unlock(&sched_job->job_lock); | 57 | fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); |
60 | return &fence->base; | ||
61 | 58 | ||
62 | err: | 59 | err: |
63 | DRM_ERROR("Run job error\n"); | 60 | if (job->free_job) |
64 | mutex_unlock(&sched_job->job_lock); | 61 | job->free_job(job); |
65 | job->sched->ops->process_job(job); | ||
66 | return NULL; | ||
67 | } | ||
68 | 62 | ||
69 | static void amdgpu_sched_process_job(struct amd_sched_job *job) | 63 | mutex_unlock(&job->job_lock); |
70 | { | 64 | fence_put(&job->base.s_fence->base); |
71 | struct amdgpu_job *sched_job; | 65 | kfree(job); |
72 | 66 | return fence ? &fence->base : NULL; | |
73 | if (!job) { | ||
74 | DRM_ERROR("job is null\n"); | ||
75 | return; | ||
76 | } | ||
77 | sched_job = (struct amdgpu_job *)job; | ||
78 | /* after processing job, free memory */ | ||
79 | fence_put(&sched_job->base.s_fence->base); | ||
80 | kfree(sched_job); | ||
81 | } | 67 | } |
82 | 68 | ||
83 | struct amd_sched_backend_ops amdgpu_sched_ops = { | 69 | struct amd_sched_backend_ops amdgpu_sched_ops = { |
84 | .dependency = amdgpu_sched_dependency, | 70 | .dependency = amdgpu_sched_dependency, |
85 | .run_job = amdgpu_sched_run_job, | 71 | .run_job = amdgpu_sched_run_job, |
86 | .process_job = amdgpu_sched_process_job | ||
87 | }; | 72 | }; |
88 | 73 | ||
89 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | 74 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, |
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
100 | kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 85 | kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
101 | if (!job) | 86 | if (!job) |
102 | return -ENOMEM; | 87 | return -ENOMEM; |
103 | job->base.sched = ring->scheduler; | 88 | job->base.sched = &ring->sched; |
104 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; | 89 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; |
105 | job->adev = adev; | 90 | job->adev = adev; |
106 | job->ibs = ibs; | 91 | job->ibs = ibs; |
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
109 | mutex_init(&job->job_lock); | 94 | mutex_init(&job->job_lock); |
110 | job->free_job = free_job; | 95 | job->free_job = free_job; |
111 | mutex_lock(&job->job_lock); | 96 | mutex_lock(&job->job_lock); |
112 | r = amd_sched_entity_push_job((struct amd_sched_job *)job); | 97 | r = amd_sched_entity_push_job(&job->base); |
113 | if (r) { | 98 | if (r) { |
114 | mutex_unlock(&job->job_lock); | 99 | mutex_unlock(&job->job_lock); |
115 | kfree(job); | 100 | kfree(job); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 068aeaff7183..4921de15b451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) | |||
65 | 65 | ||
66 | if (a_fence) | 66 | if (a_fence) |
67 | return a_fence->ring->adev == adev; | 67 | return a_fence->ring->adev == adev; |
68 | if (s_fence) | 68 | |
69 | return (struct amdgpu_device *)s_fence->scheduler->priv == adev; | 69 | if (s_fence) { |
70 | struct amdgpu_ring *ring; | ||
71 | |||
72 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | ||
73 | return ring->adev == adev; | ||
74 | } | ||
75 | |||
70 | return false; | 76 | return false; |
71 | } | 77 | } |
72 | 78 | ||
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync) | |||
251 | fence_put(e->fence); | 257 | fence_put(e->fence); |
252 | kfree(e); | 258 | kfree(e); |
253 | } | 259 | } |
260 | |||
261 | if (amdgpu_enable_semaphores) | ||
262 | return 0; | ||
263 | |||
264 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
265 | struct amdgpu_fence *fence = sync->sync_to[i]; | ||
266 | if (!fence) | ||
267 | continue; | ||
268 | |||
269 | r = fence_wait(&fence->base, false); | ||
270 | if (r) | ||
271 | return r; | ||
272 | } | ||
273 | |||
254 | return 0; | 274 | return 0; |
255 | } | 275 | } |
256 | 276 | ||
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, | |||
285 | return -EINVAL; | 305 | return -EINVAL; |
286 | } | 306 | } |
287 | 307 | ||
288 | if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { | 308 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || |
309 | (count >= AMDGPU_NUM_SYNCS)) { | ||
289 | /* not enough room, wait manually */ | 310 | /* not enough room, wait manually */ |
290 | r = fence_wait(&fence->base, false); | 311 | r = fence_wait(&fence->base, false); |
291 | if (r) | 312 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index f80b1a43be8a..4865615e9c06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
59 | goto out_cleanup; | 59 | goto out_cleanup; |
60 | } | 60 | } |
61 | 61 | ||
62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | 62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
63 | NULL, &vram_obj); | 63 | AMDGPU_GEM_DOMAIN_VRAM, 0, |
64 | NULL, NULL, &vram_obj); | ||
64 | if (r) { | 65 | if (r) { |
65 | DRM_ERROR("Failed to create VRAM object\n"); | 66 | DRM_ERROR("Failed to create VRAM object\n"); |
66 | goto out_cleanup; | 67 | goto out_cleanup; |
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
80 | struct fence *fence = NULL; | 81 | struct fence *fence = NULL; |
81 | 82 | ||
82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
83 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); | 84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, |
85 | NULL, gtt_obj + i); | ||
84 | if (r) { | 86 | if (r) { |
85 | DRM_ERROR("Failed to create GTT object %d\n", i); | 87 | DRM_ERROR("Failed to create GTT object %d\n", i); |
86 | goto out_lclean; | 88 | goto out_lclean; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b5abd5cde413..364cbe975332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
861 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | 861 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, |
862 | AMDGPU_GEM_DOMAIN_VRAM, | 862 | AMDGPU_GEM_DOMAIN_VRAM, |
863 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 863 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
864 | NULL, &adev->stollen_vga_memory); | 864 | NULL, NULL, &adev->stollen_vga_memory); |
865 | if (r) { | 865 | if (r) { |
866 | return r; | 866 | return r; |
867 | } | 867 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 482e66797ae6..5cc95f1a7dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |||
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
247 | const struct common_firmware_header *header = NULL; | 247 | const struct common_firmware_header *header = NULL; |
248 | 248 | ||
249 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | 249 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, |
250 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); | 250 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); |
251 | if (err) { | 251 | if (err) { |
252 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | 252 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); |
253 | err = -ENOMEM; | 253 | err = -ENOMEM; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2cf6c6b06e3b..d0312364d950 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
156 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, | 156 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, |
157 | AMDGPU_GEM_DOMAIN_VRAM, | 157 | AMDGPU_GEM_DOMAIN_VRAM, |
158 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 158 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
159 | NULL, &adev->uvd.vcpu_bo); | 159 | NULL, NULL, &adev->uvd.vcpu_bo); |
160 | if (r) { | 160 | if (r) { |
161 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | 161 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); |
162 | return r; | 162 | return r; |
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
543 | return -EINVAL; | 543 | return -EINVAL; |
544 | } | 544 | } |
545 | 545 | ||
546 | if (msg_type == 1) { | 546 | switch (msg_type) { |
547 | case 0: | ||
548 | /* it's a create msg, calc image size (width * height) */ | ||
549 | amdgpu_bo_kunmap(bo); | ||
550 | |||
551 | /* try to alloc a new handle */ | ||
552 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
553 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | ||
554 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | ||
559 | adev->uvd.filp[i] = ctx->parser->filp; | ||
560 | return 0; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | DRM_ERROR("No more free UVD handles!\n"); | ||
565 | return -EINVAL; | ||
566 | |||
567 | case 1: | ||
547 | /* it's a decode msg, calc buffer sizes */ | 568 | /* it's a decode msg, calc buffer sizes */ |
548 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); | 569 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); |
549 | amdgpu_bo_kunmap(bo); | 570 | amdgpu_bo_kunmap(bo); |
550 | if (r) | 571 | if (r) |
551 | return r; | 572 | return r; |
552 | 573 | ||
553 | } else if (msg_type == 2) { | 574 | /* validate the handle */ |
575 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
576 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | ||
577 | if (adev->uvd.filp[i] != ctx->parser->filp) { | ||
578 | DRM_ERROR("UVD handle collision detected!\n"); | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | return 0; | ||
582 | } | ||
583 | } | ||
584 | |||
585 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); | ||
586 | return -ENOENT; | ||
587 | |||
588 | case 2: | ||
554 | /* it's a destroy msg, free the handle */ | 589 | /* it's a destroy msg, free the handle */ |
555 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | 590 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) |
556 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | 591 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); |
557 | amdgpu_bo_kunmap(bo); | 592 | amdgpu_bo_kunmap(bo); |
558 | return 0; | 593 | return 0; |
559 | } else { | ||
560 | /* it's a create msg */ | ||
561 | amdgpu_bo_kunmap(bo); | ||
562 | |||
563 | if (msg_type != 0) { | ||
564 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
565 | return -EINVAL; | ||
566 | } | ||
567 | |||
568 | /* it's a create msg, no special handling needed */ | ||
569 | } | ||
570 | |||
571 | /* create or decode, validate the handle */ | ||
572 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
573 | if (atomic_read(&adev->uvd.handles[i]) == handle) | ||
574 | return 0; | ||
575 | } | ||
576 | 594 | ||
577 | /* handle not found try to alloc a new one */ | 595 | default: |
578 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | 596 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
579 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | 597 | return -EINVAL; |
580 | adev->uvd.filp[i] = ctx->parser->filp; | ||
581 | return 0; | ||
582 | } | ||
583 | } | 598 | } |
584 | 599 | BUG(); | |
585 | DRM_ERROR("No more free UVD handles!\n"); | ||
586 | return -EINVAL; | 600 | return -EINVAL; |
587 | } | 601 | } |
588 | 602 | ||
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
805 | } | 819 | } |
806 | 820 | ||
807 | static int amdgpu_uvd_free_job( | 821 | static int amdgpu_uvd_free_job( |
808 | struct amdgpu_job *sched_job) | 822 | struct amdgpu_job *job) |
809 | { | 823 | { |
810 | amdgpu_ib_free(sched_job->adev, sched_job->ibs); | 824 | amdgpu_ib_free(job->adev, job->ibs); |
811 | kfree(sched_job->ibs); | 825 | kfree(job->ibs); |
812 | return 0; | 826 | return 0; |
813 | } | 827 | } |
814 | 828 | ||
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
905 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 919 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
906 | AMDGPU_GEM_DOMAIN_VRAM, | 920 | AMDGPU_GEM_DOMAIN_VRAM, |
907 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 921 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
908 | NULL, &bo); | 922 | NULL, NULL, &bo); |
909 | if (r) | 923 | if (r) |
910 | return r; | 924 | return r; |
911 | 925 | ||
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
954 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 968 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
955 | AMDGPU_GEM_DOMAIN_VRAM, | 969 | AMDGPU_GEM_DOMAIN_VRAM, |
956 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 970 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
957 | NULL, &bo); | 971 | NULL, NULL, &bo); |
958 | if (r) | 972 | if (r) |
959 | return r; | 973 | return r; |
960 | 974 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 3cab96c42aa8..74f2038ac747 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) | |||
143 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 143 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
144 | AMDGPU_GEM_DOMAIN_VRAM, | 144 | AMDGPU_GEM_DOMAIN_VRAM, |
145 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 145 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
146 | NULL, &adev->vce.vcpu_bo); | 146 | NULL, NULL, &adev->vce.vcpu_bo); |
147 | if (r) { | 147 | if (r) { |
148 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | 148 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); |
149 | return r; | 149 | return r; |
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |||
342 | } | 342 | } |
343 | 343 | ||
344 | static int amdgpu_vce_free_job( | 344 | static int amdgpu_vce_free_job( |
345 | struct amdgpu_job *sched_job) | 345 | struct amdgpu_job *job) |
346 | { | 346 | { |
347 | amdgpu_ib_free(sched_job->adev, sched_job->ibs); | 347 | amdgpu_ib_free(job->adev, job->ibs); |
348 | kfree(sched_job->ibs); | 348 | kfree(job->ibs); |
349 | return 0; | 349 | return 0; |
350 | } | 350 | } |
351 | 351 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f68b7cdc370a..53d551f2d839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |||
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
319 | int amdgpu_vm_free_job(struct amdgpu_job *sched_job) | 319 | int amdgpu_vm_free_job(struct amdgpu_job *job) |
320 | { | 320 | { |
321 | int i; | 321 | int i; |
322 | for (i = 0; i < sched_job->num_ibs; i++) | 322 | for (i = 0; i < job->num_ibs; i++) |
323 | amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); | 323 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
324 | kfree(sched_job->ibs); | 324 | kfree(job->ibs); |
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | 327 | ||
@@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
455 | return -ENOMEM; | 455 | return -ENOMEM; |
456 | 456 | ||
457 | r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); | 457 | r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); |
458 | if (r) | 458 | if (r) { |
459 | kfree(ib); | ||
459 | return r; | 460 | return r; |
461 | } | ||
460 | ib->length_dw = 0; | 462 | ib->length_dw = 0; |
461 | 463 | ||
462 | /* walk over the address space and update the page directory */ | 464 | /* walk over the address space and update the page directory */ |
@@ -686,31 +688,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
686 | } | 688 | } |
687 | 689 | ||
688 | /** | 690 | /** |
689 | * amdgpu_vm_fence_pts - fence page tables after an update | ||
690 | * | ||
691 | * @vm: requested vm | ||
692 | * @start: start of GPU address range | ||
693 | * @end: end of GPU address range | ||
694 | * @fence: fence to use | ||
695 | * | ||
696 | * Fence the page tables in the range @start - @end (cayman+). | ||
697 | * | ||
698 | * Global and local mutex must be locked! | ||
699 | */ | ||
700 | static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, | ||
701 | uint64_t start, uint64_t end, | ||
702 | struct fence *fence) | ||
703 | { | ||
704 | unsigned i; | ||
705 | |||
706 | start >>= amdgpu_vm_block_size; | ||
707 | end >>= amdgpu_vm_block_size; | ||
708 | |||
709 | for (i = start; i <= end; ++i) | ||
710 | amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); | ||
711 | } | ||
712 | |||
713 | /** | ||
714 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | 691 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table |
715 | * | 692 | * |
716 | * @adev: amdgpu_device pointer | 693 | * @adev: amdgpu_device pointer |
@@ -813,8 +790,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
813 | if (r) | 790 | if (r) |
814 | goto error_free; | 791 | goto error_free; |
815 | 792 | ||
816 | amdgpu_vm_fence_pts(vm, mapping->it.start, | 793 | amdgpu_bo_fence(vm->page_directory, f, true); |
817 | mapping->it.last + 1, f); | ||
818 | if (fence) { | 794 | if (fence) { |
819 | fence_put(*fence); | 795 | fence_put(*fence); |
820 | *fence = fence_get(f); | 796 | *fence = fence_get(f); |
@@ -855,7 +831,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
855 | int r; | 831 | int r; |
856 | 832 | ||
857 | if (mem) { | 833 | if (mem) { |
858 | addr = mem->start << PAGE_SHIFT; | 834 | addr = (u64)mem->start << PAGE_SHIFT; |
859 | if (mem->mem_type != TTM_PL_TT) | 835 | if (mem->mem_type != TTM_PL_TT) |
860 | addr += adev->vm_manager.vram_base_offset; | 836 | addr += adev->vm_manager.vram_base_offset; |
861 | } else { | 837 | } else { |
@@ -1089,6 +1065,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1089 | 1065 | ||
1090 | /* walk over the address space and allocate the page tables */ | 1066 | /* walk over the address space and allocate the page tables */ |
1091 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1067 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
1068 | struct reservation_object *resv = vm->page_directory->tbo.resv; | ||
1092 | struct amdgpu_bo *pt; | 1069 | struct amdgpu_bo *pt; |
1093 | 1070 | ||
1094 | if (vm->page_tables[pt_idx].bo) | 1071 | if (vm->page_tables[pt_idx].bo) |
@@ -1097,11 +1074,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1097 | /* drop mutex to allocate and clear page table */ | 1074 | /* drop mutex to allocate and clear page table */ |
1098 | mutex_unlock(&vm->mutex); | 1075 | mutex_unlock(&vm->mutex); |
1099 | 1076 | ||
1077 | ww_mutex_lock(&resv->lock, NULL); | ||
1100 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1078 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1101 | AMDGPU_GPU_PAGE_SIZE, true, | 1079 | AMDGPU_GPU_PAGE_SIZE, true, |
1102 | AMDGPU_GEM_DOMAIN_VRAM, | 1080 | AMDGPU_GEM_DOMAIN_VRAM, |
1103 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | 1081 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, |
1104 | NULL, &pt); | 1082 | NULL, resv, &pt); |
1083 | ww_mutex_unlock(&resv->lock); | ||
1105 | if (r) | 1084 | if (r) |
1106 | goto error_free; | 1085 | goto error_free; |
1107 | 1086 | ||
@@ -1303,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1303 | r = amdgpu_bo_create(adev, pd_size, align, true, | 1282 | r = amdgpu_bo_create(adev, pd_size, align, true, |
1304 | AMDGPU_GEM_DOMAIN_VRAM, | 1283 | AMDGPU_GEM_DOMAIN_VRAM, |
1305 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | 1284 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, |
1306 | NULL, &vm->page_directory); | 1285 | NULL, NULL, &vm->page_directory); |
1307 | if (r) | 1286 | if (r) |
1308 | return r; | 1287 | return r; |
1309 | 1288 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) | |||
1279 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | 1279 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); |
1280 | } | 1280 | } |
1281 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 1281 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
1282 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | 1282 | amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); |
1283 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1284 | if (ext_encoder) | 1283 | if (ext_encoder) |
1285 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); | 1284 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); |
1286 | } else { | 1285 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 82e8d0730517..a1a35a5df8e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle) | |||
6185 | if (!amdgpu_dpm) | 6185 | if (!amdgpu_dpm) |
6186 | return 0; | 6186 | return 0; |
6187 | 6187 | ||
6188 | /* init the sysfs and debugfs files late */ | ||
6189 | ret = amdgpu_pm_sysfs_init(adev); | ||
6190 | if (ret) | ||
6191 | return ret; | ||
6192 | |||
6188 | ret = ci_set_temperature_range(adev); | 6193 | ret = ci_set_temperature_range(adev); |
6189 | if (ret) | 6194 | if (ret) |
6190 | return ret; | 6195 | return ret; |
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle) | |||
6232 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | 6237 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; |
6233 | if (amdgpu_dpm == 1) | 6238 | if (amdgpu_dpm == 1) |
6234 | amdgpu_pm_print_power_states(adev); | 6239 | amdgpu_pm_print_power_states(adev); |
6235 | ret = amdgpu_pm_sysfs_init(adev); | ||
6236 | if (ret) | ||
6237 | goto dpm_failed; | ||
6238 | mutex_unlock(&adev->pm.mutex); | 6240 | mutex_unlock(&adev->pm.mutex); |
6239 | DRM_INFO("amdgpu: dpm initialized\n"); | 6241 | DRM_INFO("amdgpu: dpm initialized\n"); |
6240 | 6242 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4b6ce74753cd..484710cfdf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1567 | int ret, i; | 1567 | int ret, i; |
1568 | u16 tmp16; | 1568 | u16 tmp16; |
1569 | 1569 | ||
1570 | if (pci_is_root_bus(adev->pdev->bus)) | ||
1571 | return; | ||
1572 | |||
1570 | if (amdgpu_pcie_gen2 == 0) | 1573 | if (amdgpu_pcie_gen2 == 0) |
1571 | return; | 1574 | return; |
1572 | 1575 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 44fa96ad4709..2e3373ed4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle) | |||
596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
597 | 597 | ||
598 | if (amdgpu_dpm) { | 598 | if (amdgpu_dpm) { |
599 | int ret; | ||
600 | /* init the sysfs and debugfs files late */ | ||
601 | ret = amdgpu_pm_sysfs_init(adev); | ||
602 | if (ret) | ||
603 | return ret; | ||
604 | |||
599 | /* powerdown unused blocks for now */ | 605 | /* powerdown unused blocks for now */ |
600 | cz_dpm_powergate_uvd(adev, true); | 606 | cz_dpm_powergate_uvd(adev, true); |
601 | cz_dpm_powergate_vce(adev, true); | 607 | cz_dpm_powergate_vce(adev, true); |
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle) | |||
632 | if (amdgpu_dpm == 1) | 638 | if (amdgpu_dpm == 1) |
633 | amdgpu_pm_print_power_states(adev); | 639 | amdgpu_pm_print_power_states(adev); |
634 | 640 | ||
635 | ret = amdgpu_pm_sysfs_init(adev); | ||
636 | if (ret) | ||
637 | goto dpm_init_failed; | ||
638 | |||
639 | mutex_unlock(&adev->pm.mutex); | 641 | mutex_unlock(&adev->pm.mutex); |
640 | DRM_INFO("amdgpu: dpm initialized\n"); | 642 | DRM_INFO("amdgpu: dpm initialized\n"); |
641 | 643 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index a72ffc7d6c26..e33180d3314a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c | |||
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev) | |||
814 | * 3. map kernel virtual address | 814 | * 3. map kernel virtual address |
815 | */ | 815 | */ |
816 | ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, | 816 | ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, |
817 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); | 817 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
818 | toc_buf); | ||
818 | 819 | ||
819 | if (ret) { | 820 | if (ret) { |
820 | dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); | 821 | dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); |
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev) | |||
822 | } | 823 | } |
823 | 824 | ||
824 | ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, | 825 | ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, |
825 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); | 826 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
827 | smu_buf); | ||
826 | 828 | ||
827 | if (ret) { | 829 | if (ret) { |
828 | dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); | 830 | dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e4d101b1252a..d4c82b625727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) | |||
255 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); | 255 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); |
256 | } | 256 | } |
257 | 257 | ||
258 | static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) | ||
259 | { | ||
260 | unsigned i; | ||
261 | |||
262 | /* Enable pflip interrupts */ | ||
263 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
264 | amdgpu_irq_get(adev, &adev->pageflip_irq, i); | ||
265 | } | ||
266 | |||
267 | static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) | ||
268 | { | ||
269 | unsigned i; | ||
270 | |||
271 | /* Disable pflip interrupts */ | ||
272 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
273 | amdgpu_irq_put(adev, &adev->pageflip_irq, i); | ||
274 | } | ||
275 | |||
258 | /** | 276 | /** |
259 | * dce_v10_0_page_flip - pageflip callback. | 277 | * dce_v10_0_page_flip - pageflip callback. |
260 | * | 278 | * |
@@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2663 | dce_v10_0_vga_enable(crtc, true); | 2681 | dce_v10_0_vga_enable(crtc, true); |
2664 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | 2682 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); |
2665 | dce_v10_0_vga_enable(crtc, false); | 2683 | dce_v10_0_vga_enable(crtc, false); |
2666 | /* Make sure VBLANK interrupt is still enabled */ | 2684 | /* Make sure VBLANK and PFLIP interrupts are still enabled */ |
2667 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | 2685 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); |
2668 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | 2686 | amdgpu_irq_update(adev, &adev->crtc_irq, type); |
2687 | amdgpu_irq_update(adev, &adev->pageflip_irq, type); | ||
2669 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | 2688 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); |
2670 | dce_v10_0_crtc_load_lut(crtc); | 2689 | dce_v10_0_crtc_load_lut(crtc); |
2671 | break; | 2690 | break; |
@@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle) | |||
3025 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 3044 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
3026 | } | 3045 | } |
3027 | 3046 | ||
3047 | dce_v10_0_pageflip_interrupt_init(adev); | ||
3048 | |||
3028 | return 0; | 3049 | return 0; |
3029 | } | 3050 | } |
3030 | 3051 | ||
@@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle) | |||
3039 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 3060 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
3040 | } | 3061 | } |
3041 | 3062 | ||
3063 | dce_v10_0_pageflip_interrupt_fini(adev); | ||
3064 | |||
3042 | return 0; | 3065 | return 0; |
3043 | } | 3066 | } |
3044 | 3067 | ||
@@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle) | |||
3050 | 3073 | ||
3051 | dce_v10_0_hpd_fini(adev); | 3074 | dce_v10_0_hpd_fini(adev); |
3052 | 3075 | ||
3076 | dce_v10_0_pageflip_interrupt_fini(adev); | ||
3077 | |||
3053 | return 0; | 3078 | return 0; |
3054 | } | 3079 | } |
3055 | 3080 | ||
@@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle) | |||
3075 | /* initialize hpd */ | 3100 | /* initialize hpd */ |
3076 | dce_v10_0_hpd_init(adev); | 3101 | dce_v10_0_hpd_init(adev); |
3077 | 3102 | ||
3103 | dce_v10_0_pageflip_interrupt_init(adev); | ||
3104 | |||
3078 | return 0; | 3105 | return 0; |
3079 | } | 3106 | } |
3080 | 3107 | ||
@@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, | |||
3369 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | 3396 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
3370 | 3397 | ||
3371 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); | 3398 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); |
3372 | amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); | ||
3373 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); | 3399 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); |
3374 | 3400 | ||
3375 | return 0; | 3401 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6411e8244671..7e1cf5e4eebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) | |||
233 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); | 233 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); |
234 | } | 234 | } |
235 | 235 | ||
236 | static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) | ||
237 | { | ||
238 | unsigned i; | ||
239 | |||
240 | /* Enable pflip interrupts */ | ||
241 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
242 | amdgpu_irq_get(adev, &adev->pageflip_irq, i); | ||
243 | } | ||
244 | |||
245 | static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) | ||
246 | { | ||
247 | unsigned i; | ||
248 | |||
249 | /* Disable pflip interrupts */ | ||
250 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
251 | amdgpu_irq_put(adev, &adev->pageflip_irq, i); | ||
252 | } | ||
253 | |||
236 | /** | 254 | /** |
237 | * dce_v11_0_page_flip - pageflip callback. | 255 | * dce_v11_0_page_flip - pageflip callback. |
238 | * | 256 | * |
@@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2640 | dce_v11_0_vga_enable(crtc, true); | 2658 | dce_v11_0_vga_enable(crtc, true); |
2641 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | 2659 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); |
2642 | dce_v11_0_vga_enable(crtc, false); | 2660 | dce_v11_0_vga_enable(crtc, false); |
2643 | /* Make sure VBLANK interrupt is still enabled */ | 2661 | /* Make sure VBLANK and PFLIP interrupts are still enabled */ |
2644 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | 2662 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); |
2645 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | 2663 | amdgpu_irq_update(adev, &adev->crtc_irq, type); |
2664 | amdgpu_irq_update(adev, &adev->pageflip_irq, type); | ||
2646 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | 2665 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); |
2647 | dce_v11_0_crtc_load_lut(crtc); | 2666 | dce_v11_0_crtc_load_lut(crtc); |
2648 | break; | 2667 | break; |
@@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle) | |||
2888 | 2907 | ||
2889 | switch (adev->asic_type) { | 2908 | switch (adev->asic_type) { |
2890 | case CHIP_CARRIZO: | 2909 | case CHIP_CARRIZO: |
2891 | adev->mode_info.num_crtc = 4; | 2910 | adev->mode_info.num_crtc = 3; |
2892 | adev->mode_info.num_hpd = 6; | 2911 | adev->mode_info.num_hpd = 6; |
2893 | adev->mode_info.num_dig = 9; | 2912 | adev->mode_info.num_dig = 9; |
2894 | break; | 2913 | break; |
@@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle) | |||
3000 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 3019 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
3001 | } | 3020 | } |
3002 | 3021 | ||
3022 | dce_v11_0_pageflip_interrupt_init(adev); | ||
3023 | |||
3003 | return 0; | 3024 | return 0; |
3004 | } | 3025 | } |
3005 | 3026 | ||
@@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle) | |||
3014 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 3035 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
3015 | } | 3036 | } |
3016 | 3037 | ||
3038 | dce_v11_0_pageflip_interrupt_fini(adev); | ||
3039 | |||
3017 | return 0; | 3040 | return 0; |
3018 | } | 3041 | } |
3019 | 3042 | ||
@@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle) | |||
3025 | 3048 | ||
3026 | dce_v11_0_hpd_fini(adev); | 3049 | dce_v11_0_hpd_fini(adev); |
3027 | 3050 | ||
3051 | dce_v11_0_pageflip_interrupt_fini(adev); | ||
3052 | |||
3028 | return 0; | 3053 | return 0; |
3029 | } | 3054 | } |
3030 | 3055 | ||
@@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle) | |||
3051 | /* initialize hpd */ | 3076 | /* initialize hpd */ |
3052 | dce_v11_0_hpd_init(adev); | 3077 | dce_v11_0_hpd_init(adev); |
3053 | 3078 | ||
3079 | dce_v11_0_pageflip_interrupt_init(adev); | ||
3080 | |||
3054 | return 0; | 3081 | return 0; |
3055 | } | 3082 | } |
3056 | 3083 | ||
@@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, | |||
3345 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | 3372 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
3346 | 3373 | ||
3347 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); | 3374 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); |
3348 | amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); | ||
3349 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); | 3375 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); |
3350 | 3376 | ||
3351 | return 0; | 3377 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c86911c2ea2a..34b9c2a9d8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) | |||
204 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); | 204 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); |
205 | } | 205 | } |
206 | 206 | ||
207 | static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) | ||
208 | { | ||
209 | unsigned i; | ||
210 | |||
211 | /* Enable pflip interrupts */ | ||
212 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
213 | amdgpu_irq_get(adev, &adev->pageflip_irq, i); | ||
214 | } | ||
215 | |||
216 | static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) | ||
217 | { | ||
218 | unsigned i; | ||
219 | |||
220 | /* Disable pflip interrupts */ | ||
221 | for (i = 0; i < adev->mode_info.num_crtc; i++) | ||
222 | amdgpu_irq_put(adev, &adev->pageflip_irq, i); | ||
223 | } | ||
224 | |||
207 | /** | 225 | /** |
208 | * dce_v8_0_page_flip - pageflip callback. | 226 | * dce_v8_0_page_flip - pageflip callback. |
209 | * | 227 | * |
@@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2575 | dce_v8_0_vga_enable(crtc, true); | 2593 | dce_v8_0_vga_enable(crtc, true); |
2576 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | 2594 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); |
2577 | dce_v8_0_vga_enable(crtc, false); | 2595 | dce_v8_0_vga_enable(crtc, false); |
2578 | /* Make sure VBLANK interrupt is still enabled */ | 2596 | /* Make sure VBLANK and PFLIP interrupts are still enabled */ |
2579 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | 2597 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); |
2580 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | 2598 | amdgpu_irq_update(adev, &adev->crtc_irq, type); |
2599 | amdgpu_irq_update(adev, &adev->pageflip_irq, type); | ||
2581 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | 2600 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); |
2582 | dce_v8_0_crtc_load_lut(crtc); | 2601 | dce_v8_0_crtc_load_lut(crtc); |
2583 | break; | 2602 | break; |
@@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle) | |||
2933 | dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 2952 | dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
2934 | } | 2953 | } |
2935 | 2954 | ||
2955 | dce_v8_0_pageflip_interrupt_init(adev); | ||
2956 | |||
2936 | return 0; | 2957 | return 0; |
2937 | } | 2958 | } |
2938 | 2959 | ||
@@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle) | |||
2947 | dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | 2968 | dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); |
2948 | } | 2969 | } |
2949 | 2970 | ||
2971 | dce_v8_0_pageflip_interrupt_fini(adev); | ||
2972 | |||
2950 | return 0; | 2973 | return 0; |
2951 | } | 2974 | } |
2952 | 2975 | ||
@@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle) | |||
2958 | 2981 | ||
2959 | dce_v8_0_hpd_fini(adev); | 2982 | dce_v8_0_hpd_fini(adev); |
2960 | 2983 | ||
2984 | dce_v8_0_pageflip_interrupt_fini(adev); | ||
2985 | |||
2961 | return 0; | 2986 | return 0; |
2962 | } | 2987 | } |
2963 | 2988 | ||
@@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle) | |||
2981 | /* initialize hpd */ | 3006 | /* initialize hpd */ |
2982 | dce_v8_0_hpd_init(adev); | 3007 | dce_v8_0_hpd_init(adev); |
2983 | 3008 | ||
3009 | dce_v8_0_pageflip_interrupt_init(adev); | ||
3010 | |||
2984 | return 0; | 3011 | return 0; |
2985 | } | 3012 | } |
2986 | 3013 | ||
@@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, | |||
3376 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | 3403 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
3377 | 3404 | ||
3378 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); | 3405 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); |
3379 | amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); | ||
3380 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); | 3406 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); |
3381 | 3407 | ||
3382 | return 0; | 3408 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index 322edea65857..bda1249eb871 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c | |||
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev) | |||
764 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | 764 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, |
765 | true, AMDGPU_GEM_DOMAIN_VRAM, | 765 | true, AMDGPU_GEM_DOMAIN_VRAM, |
766 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 766 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
767 | NULL, toc_buf); | 767 | NULL, NULL, toc_buf); |
768 | if (ret) { | 768 | if (ret) { |
769 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | 769 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); |
770 | return -ENOMEM; | 770 | return -ENOMEM; |
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev) | |||
774 | ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, | 774 | ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, |
775 | true, AMDGPU_GEM_DOMAIN_VRAM, | 775 | true, AMDGPU_GEM_DOMAIN_VRAM, |
776 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 776 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
777 | NULL, smu_buf); | 777 | NULL, NULL, smu_buf); |
778 | if (ret) { | 778 | if (ret) { |
779 | DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); | 779 | DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); |
780 | return -ENOMEM; | 780 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 4bd1e5cf65ca..e992bf2ff66c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | |||
3206 | r = amdgpu_bo_create(adev, | 3206 | r = amdgpu_bo_create(adev, |
3207 | adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, | 3207 | adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, |
3208 | PAGE_SIZE, true, | 3208 | PAGE_SIZE, true, |
3209 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 3209 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
3210 | &adev->gfx.mec.hpd_eop_obj); | 3210 | &adev->gfx.mec.hpd_eop_obj); |
3211 | if (r) { | 3211 | if (r) { |
3212 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | 3212 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); |
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
3373 | r = amdgpu_bo_create(adev, | 3373 | r = amdgpu_bo_create(adev, |
3374 | sizeof(struct bonaire_mqd), | 3374 | sizeof(struct bonaire_mqd), |
3375 | PAGE_SIZE, true, | 3375 | PAGE_SIZE, true, |
3376 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 3376 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
3377 | &ring->mqd_obj); | 3377 | &ring->mqd_obj); |
3378 | if (r) { | 3378 | if (r) { |
3379 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); | 3379 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); |
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) | |||
3610 | return 0; | 3610 | return 0; |
3611 | } | 3611 | } |
3612 | 3612 | ||
3613 | static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) | ||
3614 | { | ||
3615 | struct amdgpu_device *adev = ring->adev; | ||
3616 | u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; | ||
3617 | |||
3618 | /* instruct DE to set a magic number */ | ||
3619 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3620 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3621 | WRITE_DATA_DST_SEL(5))); | ||
3622 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3623 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3624 | amdgpu_ring_write(ring, 1); | ||
3625 | |||
3626 | /* let CE wait till condition satisfied */ | ||
3627 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
3628 | amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ | ||
3629 | WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ | ||
3630 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | ||
3631 | WAIT_REG_MEM_ENGINE(2))); /* ce */ | ||
3632 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3633 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3634 | amdgpu_ring_write(ring, 1); | ||
3635 | amdgpu_ring_write(ring, 0xffffffff); | ||
3636 | amdgpu_ring_write(ring, 4); /* poll interval */ | ||
3637 | |||
3638 | /* instruct CE to reset wb of ce_sync to zero */ | ||
3639 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3640 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | | ||
3641 | WRITE_DATA_DST_SEL(5) | | ||
3642 | WR_CONFIRM)); | ||
3643 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3644 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3645 | amdgpu_ring_write(ring, 0); | ||
3646 | } | ||
3647 | |||
3648 | /* | 3613 | /* |
3649 | * vm | 3614 | * vm |
3650 | * VMID 0 is the physical GPU addresses as used by the kernel. | 3615 | * VMID 0 is the physical GPU addresses as used by the kernel. |
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
3663 | unsigned vm_id, uint64_t pd_addr) | 3628 | unsigned vm_id, uint64_t pd_addr) |
3664 | { | 3629 | { |
3665 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3630 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); |
3631 | if (usepfp) { | ||
3632 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ | ||
3633 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | ||
3634 | amdgpu_ring_write(ring, 0); | ||
3635 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | ||
3636 | amdgpu_ring_write(ring, 0); | ||
3637 | } | ||
3666 | 3638 | ||
3667 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 3639 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3668 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 3640 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
3703 | amdgpu_ring_write(ring, 0x0); | 3675 | amdgpu_ring_write(ring, 0x0); |
3704 | 3676 | ||
3705 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ | 3677 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ |
3706 | gfx_v7_0_ce_sync_me(ring); | 3678 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
3679 | amdgpu_ring_write(ring, 0); | ||
3680 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | ||
3681 | amdgpu_ring_write(ring, 0); | ||
3707 | } | 3682 | } |
3708 | } | 3683 | } |
3709 | 3684 | ||
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3788 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3763 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3789 | AMDGPU_GEM_DOMAIN_VRAM, | 3764 | AMDGPU_GEM_DOMAIN_VRAM, |
3790 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3765 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
3791 | NULL, &adev->gfx.rlc.save_restore_obj); | 3766 | NULL, NULL, |
3767 | &adev->gfx.rlc.save_restore_obj); | ||
3792 | if (r) { | 3768 | if (r) { |
3793 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); | 3769 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); |
3794 | return r; | 3770 | return r; |
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3831 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3807 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3832 | AMDGPU_GEM_DOMAIN_VRAM, | 3808 | AMDGPU_GEM_DOMAIN_VRAM, |
3833 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3809 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
3834 | NULL, &adev->gfx.rlc.clear_state_obj); | 3810 | NULL, NULL, |
3811 | &adev->gfx.rlc.clear_state_obj); | ||
3835 | if (r) { | 3812 | if (r) { |
3836 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | 3813 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); |
3837 | gfx_v7_0_rlc_fini(adev); | 3814 | gfx_v7_0_rlc_fini(adev); |
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3870 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 3847 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
3871 | AMDGPU_GEM_DOMAIN_VRAM, | 3848 | AMDGPU_GEM_DOMAIN_VRAM, |
3872 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3849 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
3873 | NULL, &adev->gfx.rlc.cp_table_obj); | 3850 | NULL, NULL, |
3851 | &adev->gfx.rlc.cp_table_obj); | ||
3874 | if (r) { | 3852 | if (r) { |
3875 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | 3853 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); |
3876 | gfx_v7_0_rlc_fini(adev); | 3854 | gfx_v7_0_rlc_fini(adev); |
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4802 | return r; | 4780 | return r; |
4803 | } | 4781 | } |
4804 | 4782 | ||
4805 | r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); | ||
4806 | if (r) { | ||
4807 | DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); | ||
4808 | return r; | ||
4809 | } | ||
4810 | |||
4811 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | 4783 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
4812 | ring = &adev->gfx.gfx_ring[i]; | 4784 | ring = &adev->gfx.gfx_ring[i]; |
4813 | ring->ring_obj = NULL; | 4785 | ring->ring_obj = NULL; |
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4851 | r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, | 4823 | r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, |
4852 | PAGE_SIZE, true, | 4824 | PAGE_SIZE, true, |
4853 | AMDGPU_GEM_DOMAIN_GDS, 0, | 4825 | AMDGPU_GEM_DOMAIN_GDS, 0, |
4854 | NULL, &adev->gds.gds_gfx_bo); | 4826 | NULL, NULL, &adev->gds.gds_gfx_bo); |
4855 | if (r) | 4827 | if (r) |
4856 | return r; | 4828 | return r; |
4857 | 4829 | ||
4858 | r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, | 4830 | r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, |
4859 | PAGE_SIZE, true, | 4831 | PAGE_SIZE, true, |
4860 | AMDGPU_GEM_DOMAIN_GWS, 0, | 4832 | AMDGPU_GEM_DOMAIN_GWS, 0, |
4861 | NULL, &adev->gds.gws_gfx_bo); | 4833 | NULL, NULL, &adev->gds.gws_gfx_bo); |
4862 | if (r) | 4834 | if (r) |
4863 | return r; | 4835 | return r; |
4864 | 4836 | ||
4865 | r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, | 4837 | r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, |
4866 | PAGE_SIZE, true, | 4838 | PAGE_SIZE, true, |
4867 | AMDGPU_GEM_DOMAIN_OA, 0, | 4839 | AMDGPU_GEM_DOMAIN_OA, 0, |
4868 | NULL, &adev->gds.oa_gfx_bo); | 4840 | NULL, NULL, &adev->gds.oa_gfx_bo); |
4869 | if (r) | 4841 | if (r) |
4870 | return r; | 4842 | return r; |
4871 | 4843 | ||
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
4886 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 4858 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
4887 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | 4859 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
4888 | 4860 | ||
4889 | amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); | ||
4890 | |||
4891 | gfx_v7_0_cp_compute_fini(adev); | 4861 | gfx_v7_0_cp_compute_fini(adev); |
4892 | gfx_v7_0_rlc_fini(adev); | 4862 | gfx_v7_0_rlc_fini(adev); |
4893 | gfx_v7_0_mec_fini(adev); | 4863 | gfx_v7_0_mec_fini(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 53f07439a512..cb4f68f53f24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | |||
868 | r = amdgpu_bo_create(adev, | 868 | r = amdgpu_bo_create(adev, |
869 | adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, | 869 | adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, |
870 | PAGE_SIZE, true, | 870 | PAGE_SIZE, true, |
871 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 871 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, |
872 | &adev->gfx.mec.hpd_eop_obj); | 872 | &adev->gfx.mec.hpd_eop_obj); |
873 | if (r) { | 873 | if (r) { |
874 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | 874 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); |
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle) | |||
940 | return r; | 940 | return r; |
941 | } | 941 | } |
942 | 942 | ||
943 | r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); | ||
944 | if (r) { | ||
945 | DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); | ||
946 | return r; | ||
947 | } | ||
948 | |||
949 | /* set up the gfx ring */ | 943 | /* set up the gfx ring */ |
950 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | 944 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
951 | ring = &adev->gfx.gfx_ring[i]; | 945 | ring = &adev->gfx.gfx_ring[i]; |
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle) | |||
995 | /* reserve GDS, GWS and OA resource for gfx */ | 989 | /* reserve GDS, GWS and OA resource for gfx */ |
996 | r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, | 990 | r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, |
997 | PAGE_SIZE, true, | 991 | PAGE_SIZE, true, |
998 | AMDGPU_GEM_DOMAIN_GDS, 0, | 992 | AMDGPU_GEM_DOMAIN_GDS, 0, NULL, |
999 | NULL, &adev->gds.gds_gfx_bo); | 993 | NULL, &adev->gds.gds_gfx_bo); |
1000 | if (r) | 994 | if (r) |
1001 | return r; | 995 | return r; |
1002 | 996 | ||
1003 | r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, | 997 | r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, |
1004 | PAGE_SIZE, true, | 998 | PAGE_SIZE, true, |
1005 | AMDGPU_GEM_DOMAIN_GWS, 0, | 999 | AMDGPU_GEM_DOMAIN_GWS, 0, NULL, |
1006 | NULL, &adev->gds.gws_gfx_bo); | 1000 | NULL, &adev->gds.gws_gfx_bo); |
1007 | if (r) | 1001 | if (r) |
1008 | return r; | 1002 | return r; |
1009 | 1003 | ||
1010 | r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, | 1004 | r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, |
1011 | PAGE_SIZE, true, | 1005 | PAGE_SIZE, true, |
1012 | AMDGPU_GEM_DOMAIN_OA, 0, | 1006 | AMDGPU_GEM_DOMAIN_OA, 0, NULL, |
1013 | NULL, &adev->gds.oa_gfx_bo); | 1007 | NULL, &adev->gds.oa_gfx_bo); |
1014 | if (r) | 1008 | if (r) |
1015 | return r; | 1009 | return r; |
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
1033 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 1027 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
1034 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | 1028 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
1035 | 1029 | ||
1036 | amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); | ||
1037 | |||
1038 | gfx_v8_0_mec_fini(adev); | 1030 | gfx_v8_0_mec_fini(adev); |
1039 | 1031 | ||
1040 | return 0; | 1032 | return 0; |
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) | |||
3106 | sizeof(struct vi_mqd), | 3098 | sizeof(struct vi_mqd), |
3107 | PAGE_SIZE, true, | 3099 | PAGE_SIZE, true, |
3108 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 3100 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, |
3109 | &ring->mqd_obj); | 3101 | NULL, &ring->mqd_obj); |
3110 | if (r) { | 3102 | if (r) { |
3111 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); | 3103 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); |
3112 | return r; | 3104 | return r; |
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | |||
3965 | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); | 3957 | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); |
3966 | amdgpu_ring_write(ring, lower_32_bits(seq)); | 3958 | amdgpu_ring_write(ring, lower_32_bits(seq)); |
3967 | amdgpu_ring_write(ring, upper_32_bits(seq)); | 3959 | amdgpu_ring_write(ring, upper_32_bits(seq)); |
3960 | |||
3968 | } | 3961 | } |
3969 | 3962 | ||
3970 | /** | 3963 | /** |
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
4005 | return true; | 3998 | return true; |
4006 | } | 3999 | } |
4007 | 4000 | ||
4008 | static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) | 4001 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
4002 | unsigned vm_id, uint64_t pd_addr) | ||
4009 | { | 4003 | { |
4010 | struct amdgpu_device *adev = ring->adev; | 4004 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); |
4011 | u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; | 4005 | uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; |
4012 | 4006 | uint64_t addr = ring->fence_drv.gpu_addr; | |
4013 | /* instruct DE to set a magic number */ | ||
4014 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
4015 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
4016 | WRITE_DATA_DST_SEL(5))); | ||
4017 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
4018 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
4019 | amdgpu_ring_write(ring, 1); | ||
4020 | 4007 | ||
4021 | /* let CE wait till condition satisfied */ | ||
4022 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | 4008 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
4023 | amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ | 4009 | amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ |
4024 | WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ | 4010 | WAIT_REG_MEM_FUNCTION(3))); /* equal */ |
4025 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | 4011 | amdgpu_ring_write(ring, addr & 0xfffffffc); |
4026 | WAIT_REG_MEM_ENGINE(2))); /* ce */ | 4012 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); |
4027 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | 4013 | amdgpu_ring_write(ring, seq); |
4028 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
4029 | amdgpu_ring_write(ring, 1); | ||
4030 | amdgpu_ring_write(ring, 0xffffffff); | 4014 | amdgpu_ring_write(ring, 0xffffffff); |
4031 | amdgpu_ring_write(ring, 4); /* poll interval */ | 4015 | amdgpu_ring_write(ring, 4); /* poll interval */ |
4032 | 4016 | ||
4033 | /* instruct CE to reset wb of ce_sync to zero */ | 4017 | if (usepfp) { |
4034 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 4018 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ |
4035 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | | 4019 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
4036 | WRITE_DATA_DST_SEL(5) | | 4020 | amdgpu_ring_write(ring, 0); |
4037 | WR_CONFIRM)); | 4021 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
4038 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | 4022 | amdgpu_ring_write(ring, 0); |
4039 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | 4023 | } |
4040 | amdgpu_ring_write(ring, 0); | ||
4041 | } | ||
4042 | |||
4043 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||
4044 | unsigned vm_id, uint64_t pd_addr) | ||
4045 | { | ||
4046 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | ||
4047 | 4024 | ||
4048 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 4025 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
4049 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 4026 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
4050 | WRITE_DATA_DST_SEL(0))); | 4027 | WRITE_DATA_DST_SEL(0)) | |
4028 | WR_CONFIRM); | ||
4051 | if (vm_id < 8) { | 4029 | if (vm_id < 8) { |
4052 | amdgpu_ring_write(ring, | 4030 | amdgpu_ring_write(ring, |
4053 | (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); | 4031 | (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); |
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
4083 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | 4061 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ |
4084 | amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | 4062 | amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
4085 | amdgpu_ring_write(ring, 0x0); | 4063 | amdgpu_ring_write(ring, 0x0); |
4086 | 4064 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | |
4087 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ | 4065 | amdgpu_ring_write(ring, 0); |
4088 | gfx_v8_0_ce_sync_me(ring); | 4066 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
4067 | amdgpu_ring_write(ring, 0); | ||
4089 | } | 4068 | } |
4090 | } | 4069 | } |
4091 | 4070 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, | |||
1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | 1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | 1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | 1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); |
1265 | /* reset addr and status */ | ||
1266 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1267 | |||
1268 | if (!addr && !status) | ||
1269 | return 0; | ||
1270 | |||
1265 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | 1271 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", |
1266 | entry->src_id, entry->src_data); | 1272 | entry->src_id, entry->src_data); |
1267 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 1273 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, | |||
1269 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 1275 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
1270 | status); | 1276 | status); |
1271 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); | 1277 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); |
1272 | /* reset addr and status */ | ||
1273 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1274 | 1278 | ||
1275 | return 0; | 1279 | return 0; |
1276 | } | 1280 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |||
1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | 1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | 1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | 1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); |
1265 | /* reset addr and status */ | ||
1266 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1267 | |||
1268 | if (!addr && !status) | ||
1269 | return 0; | ||
1270 | |||
1265 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | 1271 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", |
1266 | entry->src_id, entry->src_data); | 1272 | entry->src_id, entry->src_data); |
1267 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 1273 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |||
1269 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 1275 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
1270 | status); | 1276 | status); |
1271 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); | 1277 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); |
1272 | /* reset addr and status */ | ||
1273 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1274 | 1278 | ||
1275 | return 0; | 1279 | return 0; |
1276 | } | 1280 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index c900aa942ade..966d4b2ed9da 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c | |||
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev) | |||
625 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | 625 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, |
626 | true, AMDGPU_GEM_DOMAIN_VRAM, | 626 | true, AMDGPU_GEM_DOMAIN_VRAM, |
627 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 627 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
628 | NULL, toc_buf); | 628 | NULL, NULL, toc_buf); |
629 | if (ret) { | 629 | if (ret) { |
630 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | 630 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); |
631 | return -ENOMEM; | 631 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 94ec04a9c4d5..9745ed3a9aef 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle) | |||
2995 | { | 2995 | { |
2996 | /* powerdown unused blocks for now */ | 2996 | /* powerdown unused blocks for now */ |
2997 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2997 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2998 | int ret; | ||
2999 | |||
3000 | /* init the sysfs and debugfs files late */ | ||
3001 | ret = amdgpu_pm_sysfs_init(adev); | ||
3002 | if (ret) | ||
3003 | return ret; | ||
2998 | 3004 | ||
2999 | kv_dpm_powergate_acp(adev, true); | 3005 | kv_dpm_powergate_acp(adev, true); |
3000 | kv_dpm_powergate_samu(adev, true); | 3006 | kv_dpm_powergate_samu(adev, true); |
@@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle) | |||
3038 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | 3044 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; |
3039 | if (amdgpu_dpm == 1) | 3045 | if (amdgpu_dpm == 1) |
3040 | amdgpu_pm_print_power_states(adev); | 3046 | amdgpu_pm_print_power_states(adev); |
3041 | ret = amdgpu_pm_sysfs_init(adev); | ||
3042 | if (ret) | ||
3043 | goto dpm_failed; | ||
3044 | mutex_unlock(&adev->pm.mutex); | 3047 | mutex_unlock(&adev->pm.mutex); |
3045 | DRM_INFO("amdgpu: dpm initialized\n"); | 3048 | DRM_INFO("amdgpu: dpm initialized\n"); |
3046 | 3049 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 1f5ac941a610..5421309c1862 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c | |||
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev) | |||
763 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | 763 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, |
764 | true, AMDGPU_GEM_DOMAIN_VRAM, | 764 | true, AMDGPU_GEM_DOMAIN_VRAM, |
765 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 765 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
766 | NULL, toc_buf); | 766 | NULL, NULL, toc_buf); |
767 | if (ret) { | 767 | if (ret) { |
768 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | 768 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); |
769 | return -ENOMEM; | 769 | return -ENOMEM; |
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev) | |||
773 | ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, | 773 | ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, |
774 | true, AMDGPU_GEM_DOMAIN_VRAM, | 774 | true, AMDGPU_GEM_DOMAIN_VRAM, |
775 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 775 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
776 | NULL, smu_buf); | 776 | NULL, NULL, smu_buf); |
777 | if (ret) { | 777 | if (ret) { |
778 | DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); | 778 | DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); |
779 | return -ENOMEM; | 779 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5fac5da694f0..ed50dd725788 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle) | |||
224 | int r; | 224 | int r; |
225 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 225 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
226 | 226 | ||
227 | r = uvd_v4_2_hw_fini(adev); | 227 | r = amdgpu_uvd_suspend(adev); |
228 | if (r) | 228 | if (r) |
229 | return r; | 229 | return r; |
230 | 230 | ||
231 | r = amdgpu_uvd_suspend(adev); | 231 | r = uvd_v4_2_hw_fini(adev); |
232 | if (r) | 232 | if (r) |
233 | return r; | 233 | return r; |
234 | 234 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 2d5c59c318af..9ad8b9906c0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle) | |||
220 | int r; | 220 | int r; |
221 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 221 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
222 | 222 | ||
223 | r = uvd_v5_0_hw_fini(adev); | 223 | r = amdgpu_uvd_suspend(adev); |
224 | if (r) | 224 | if (r) |
225 | return r; | 225 | return r; |
226 | 226 | ||
227 | r = amdgpu_uvd_suspend(adev); | 227 | r = uvd_v5_0_hw_fini(adev); |
228 | if (r) | 228 | if (r) |
229 | return r; | 229 | return r; |
230 | 230 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d9f553fce531..7e9934fa4193 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle) | |||
214 | int r; | 214 | int r; |
215 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 215 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
216 | 216 | ||
217 | /* Skip this for APU for now */ | ||
218 | if (!(adev->flags & AMD_IS_APU)) { | ||
219 | r = amdgpu_uvd_suspend(adev); | ||
220 | if (r) | ||
221 | return r; | ||
222 | } | ||
217 | r = uvd_v6_0_hw_fini(adev); | 223 | r = uvd_v6_0_hw_fini(adev); |
218 | if (r) | 224 | if (r) |
219 | return r; | 225 | return r; |
220 | 226 | ||
221 | r = amdgpu_uvd_suspend(adev); | ||
222 | if (r) | ||
223 | return r; | ||
224 | |||
225 | return r; | 227 | return r; |
226 | } | 228 | } |
227 | 229 | ||
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle) | |||
230 | int r; | 232 | int r; |
231 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 233 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
232 | 234 | ||
233 | r = amdgpu_uvd_resume(adev); | 235 | /* Skip this for APU for now */ |
234 | if (r) | 236 | if (!(adev->flags & AMD_IS_APU)) { |
235 | return r; | 237 | r = amdgpu_uvd_resume(adev); |
236 | 238 | if (r) | |
239 | return r; | ||
240 | } | ||
237 | r = uvd_v6_0_hw_init(adev); | 241 | r = uvd_v6_0_hw_init(adev); |
238 | if (r) | 242 | if (r) |
239 | return r; | 243 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 552d9e75ad1b..0bac8702e934 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1005 | u32 mask; | 1005 | u32 mask; |
1006 | int ret; | 1006 | int ret; |
1007 | 1007 | ||
1008 | if (pci_is_root_bus(adev->pdev->bus)) | ||
1009 | return; | ||
1010 | |||
1008 | if (amdgpu_pcie_gen2 == 0) | 1011 | if (amdgpu_pcie_gen2 == 0) |
1009 | return; | 1012 | return; |
1010 | 1013 | ||
@@ -1400,7 +1403,8 @@ static int vi_common_early_init(void *handle) | |||
1400 | case CHIP_CARRIZO: | 1403 | case CHIP_CARRIZO: |
1401 | adev->has_uvd = true; | 1404 | adev->has_uvd = true; |
1402 | adev->cg_flags = 0; | 1405 | adev->cg_flags = 0; |
1403 | adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; | 1406 | /* Disable UVD pg */ |
1407 | adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; | ||
1404 | adev->external_rev_id = adev->rev_id + 0x1; | 1408 | adev->external_rev_id = adev->rev_id + 0x1; |
1405 | if (amdgpu_smc_load_fw && smc_enabled) | 1409 | if (amdgpu_smc_load_fw && smc_enabled) |
1406 | adev->firmware.smu_load = true; | 1410 | adev->firmware.smu_load = true; |
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h | |||
@@ -27,19 +27,6 @@ | |||
27 | #include "cgs_common.h" | 27 | #include "cgs_common.h" |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * cgs_import_gpu_mem() - Import dmabuf handle | ||
31 | * @cgs_device: opaque device handle | ||
32 | * @dmabuf_fd: DMABuf file descriptor | ||
33 | * @handle: memory handle (output) | ||
34 | * | ||
35 | * Must be called in the process context that dmabuf_fd belongs to. | ||
36 | * | ||
37 | * Return: 0 on success, -errno otherwise | ||
38 | */ | ||
39 | typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, | ||
40 | cgs_handle_t *handle); | ||
41 | |||
42 | /** | ||
43 | * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources | 30 | * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources |
44 | * @private_data: private data provided to cgs_add_irq_source | 31 | * @private_data: private data provided to cgs_add_irq_source |
45 | * @src_id: interrupt source ID | 32 | * @src_id: interrupt source ID |
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); | |||
114 | typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); | 101 | typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); |
115 | 102 | ||
116 | struct cgs_os_ops { | 103 | struct cgs_os_ops { |
117 | cgs_import_gpu_mem_t import_gpu_mem; | ||
118 | |||
119 | /* IRQ handling */ | 104 | /* IRQ handling */ |
120 | cgs_add_irq_source_t add_irq_source; | 105 | cgs_add_irq_source_t add_irq_source; |
121 | cgs_irq_get_t irq_get; | 106 | cgs_irq_get_t irq_get; |
122 | cgs_irq_put_t irq_put; | 107 | cgs_irq_put_t irq_put; |
123 | }; | 108 | }; |
124 | 109 | ||
125 | #define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ | ||
126 | CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) | ||
127 | #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ | 110 | #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ |
128 | CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ | 111 | CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ |
129 | private_data) | 112 | private_data) |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h new file mode 100644 index 000000000000..144f50acc971 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _GPU_SCHED_TRACE_H_ | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #include <drm/drmP.h> | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM gpu_sched | ||
12 | #define TRACE_INCLUDE_FILE gpu_sched_trace | ||
13 | |||
14 | TRACE_EVENT(amd_sched_job, | ||
15 | TP_PROTO(struct amd_sched_job *sched_job), | ||
16 | TP_ARGS(sched_job), | ||
17 | TP_STRUCT__entry( | ||
18 | __field(struct amd_sched_entity *, entity) | ||
19 | __field(const char *, name) | ||
20 | __field(u32, job_count) | ||
21 | __field(int, hw_job_count) | ||
22 | ), | ||
23 | |||
24 | TP_fast_assign( | ||
25 | __entry->entity = sched_job->s_entity; | ||
26 | __entry->name = sched_job->sched->name; | ||
27 | __entry->job_count = kfifo_len( | ||
28 | &sched_job->s_entity->job_queue) / sizeof(sched_job); | ||
29 | __entry->hw_job_count = atomic_read( | ||
30 | &sched_job->sched->hw_rq_count); | ||
31 | ), | ||
32 | TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", | ||
33 | __entry->entity, __entry->name, __entry->job_count, | ||
34 | __entry->hw_job_count) | ||
35 | ); | ||
36 | #endif | ||
37 | |||
38 | /* This part must be outside protection */ | ||
39 | #undef TRACE_INCLUDE_PATH | ||
40 | #define TRACE_INCLUDE_PATH . | ||
41 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 9259f1b6664c..3697eeeecf82 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include "gpu_scheduler.h" | 28 | #include "gpu_scheduler.h" |
29 | 29 | ||
30 | #define CREATE_TRACE_POINTS | ||
31 | #include "gpu_sched_trace.h" | ||
32 | |||
30 | static struct amd_sched_job * | 33 | static struct amd_sched_job * |
31 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | 34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); |
32 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
@@ -65,29 +68,29 @@ static struct amd_sched_job * | |||
65 | amd_sched_rq_select_job(struct amd_sched_rq *rq) | 68 | amd_sched_rq_select_job(struct amd_sched_rq *rq) |
66 | { | 69 | { |
67 | struct amd_sched_entity *entity; | 70 | struct amd_sched_entity *entity; |
68 | struct amd_sched_job *job; | 71 | struct amd_sched_job *sched_job; |
69 | 72 | ||
70 | spin_lock(&rq->lock); | 73 | spin_lock(&rq->lock); |
71 | 74 | ||
72 | entity = rq->current_entity; | 75 | entity = rq->current_entity; |
73 | if (entity) { | 76 | if (entity) { |
74 | list_for_each_entry_continue(entity, &rq->entities, list) { | 77 | list_for_each_entry_continue(entity, &rq->entities, list) { |
75 | job = amd_sched_entity_pop_job(entity); | 78 | sched_job = amd_sched_entity_pop_job(entity); |
76 | if (job) { | 79 | if (sched_job) { |
77 | rq->current_entity = entity; | 80 | rq->current_entity = entity; |
78 | spin_unlock(&rq->lock); | 81 | spin_unlock(&rq->lock); |
79 | return job; | 82 | return sched_job; |
80 | } | 83 | } |
81 | } | 84 | } |
82 | } | 85 | } |
83 | 86 | ||
84 | list_for_each_entry(entity, &rq->entities, list) { | 87 | list_for_each_entry(entity, &rq->entities, list) { |
85 | 88 | ||
86 | job = amd_sched_entity_pop_job(entity); | 89 | sched_job = amd_sched_entity_pop_job(entity); |
87 | if (job) { | 90 | if (sched_job) { |
88 | rq->current_entity = entity; | 91 | rq->current_entity = entity; |
89 | spin_unlock(&rq->lock); | 92 | spin_unlock(&rq->lock); |
90 | return job; | 93 | return sched_job; |
91 | } | 94 | } |
92 | 95 | ||
93 | if (entity == rq->current_entity) | 96 | if (entity == rq->current_entity) |
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
115 | struct amd_sched_rq *rq, | 118 | struct amd_sched_rq *rq, |
116 | uint32_t jobs) | 119 | uint32_t jobs) |
117 | { | 120 | { |
121 | int r; | ||
122 | |||
118 | if (!(sched && entity && rq)) | 123 | if (!(sched && entity && rq)) |
119 | return -EINVAL; | 124 | return -EINVAL; |
120 | 125 | ||
121 | memset(entity, 0, sizeof(struct amd_sched_entity)); | 126 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
122 | entity->belongto_rq = rq; | 127 | INIT_LIST_HEAD(&entity->list); |
123 | entity->scheduler = sched; | 128 | entity->rq = rq; |
124 | entity->fence_context = fence_context_alloc(1); | 129 | entity->sched = sched; |
125 | if(kfifo_alloc(&entity->job_queue, | ||
126 | jobs * sizeof(void *), | ||
127 | GFP_KERNEL)) | ||
128 | return -EINVAL; | ||
129 | 130 | ||
130 | spin_lock_init(&entity->queue_lock); | 131 | spin_lock_init(&entity->queue_lock); |
132 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); | ||
133 | if (r) | ||
134 | return r; | ||
135 | |||
131 | atomic_set(&entity->fence_seq, 0); | 136 | atomic_set(&entity->fence_seq, 0); |
137 | entity->fence_context = fence_context_alloc(1); | ||
132 | 138 | ||
133 | /* Add the entity to the run queue */ | 139 | /* Add the entity to the run queue */ |
134 | amd_sched_rq_add_entity(rq, entity); | 140 | amd_sched_rq_add_entity(rq, entity); |
141 | |||
135 | return 0; | 142 | return 0; |
136 | } | 143 | } |
137 | 144 | ||
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
146 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, | 153 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
147 | struct amd_sched_entity *entity) | 154 | struct amd_sched_entity *entity) |
148 | { | 155 | { |
149 | return entity->scheduler == sched && | 156 | return entity->sched == sched && |
150 | entity->belongto_rq != NULL; | 157 | entity->rq != NULL; |
151 | } | 158 | } |
152 | 159 | ||
153 | /** | 160 | /** |
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |||
177 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | 184 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
178 | struct amd_sched_entity *entity) | 185 | struct amd_sched_entity *entity) |
179 | { | 186 | { |
180 | struct amd_sched_rq *rq = entity->belongto_rq; | 187 | struct amd_sched_rq *rq = entity->rq; |
181 | 188 | ||
182 | if (!amd_sched_entity_is_initialized(sched, entity)) | 189 | if (!amd_sched_entity_is_initialized(sched, entity)) |
183 | return; | 190 | return; |
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) | |||
198 | container_of(cb, struct amd_sched_entity, cb); | 205 | container_of(cb, struct amd_sched_entity, cb); |
199 | entity->dependency = NULL; | 206 | entity->dependency = NULL; |
200 | fence_put(f); | 207 | fence_put(f); |
201 | amd_sched_wakeup(entity->scheduler); | 208 | amd_sched_wakeup(entity->sched); |
202 | } | 209 | } |
203 | 210 | ||
204 | static struct amd_sched_job * | 211 | static struct amd_sched_job * |
205 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | 212 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
206 | { | 213 | { |
207 | struct amd_gpu_scheduler *sched = entity->scheduler; | 214 | struct amd_gpu_scheduler *sched = entity->sched; |
208 | struct amd_sched_job *job; | 215 | struct amd_sched_job *sched_job; |
209 | 216 | ||
210 | if (ACCESS_ONCE(entity->dependency)) | 217 | if (ACCESS_ONCE(entity->dependency)) |
211 | return NULL; | 218 | return NULL; |
212 | 219 | ||
213 | if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) | 220 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
214 | return NULL; | 221 | return NULL; |
215 | 222 | ||
216 | while ((entity->dependency = sched->ops->dependency(job))) { | 223 | while ((entity->dependency = sched->ops->dependency(sched_job))) { |
217 | 224 | ||
218 | if (fence_add_callback(entity->dependency, &entity->cb, | 225 | if (fence_add_callback(entity->dependency, &entity->cb, |
219 | amd_sched_entity_wakeup)) | 226 | amd_sched_entity_wakeup)) |
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |||
222 | return NULL; | 229 | return NULL; |
223 | } | 230 | } |
224 | 231 | ||
225 | return job; | 232 | return sched_job; |
226 | } | 233 | } |
227 | 234 | ||
228 | /** | 235 | /** |
229 | * Helper to submit a job to the job queue | 236 | * Helper to submit a job to the job queue |
230 | * | 237 | * |
231 | * @job The pointer to job required to submit | 238 | * @sched_job The pointer to job required to submit |
232 | * | 239 | * |
233 | * Returns true if we could submit the job. | 240 | * Returns true if we could submit the job. |
234 | */ | 241 | */ |
235 | static bool amd_sched_entity_in(struct amd_sched_job *job) | 242 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
236 | { | 243 | { |
237 | struct amd_sched_entity *entity = job->s_entity; | 244 | struct amd_sched_entity *entity = sched_job->s_entity; |
238 | bool added, first = false; | 245 | bool added, first = false; |
239 | 246 | ||
240 | spin_lock(&entity->queue_lock); | 247 | spin_lock(&entity->queue_lock); |
241 | added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); | 248 | added = kfifo_in(&entity->job_queue, &sched_job, |
249 | sizeof(sched_job)) == sizeof(sched_job); | ||
242 | 250 | ||
243 | if (added && kfifo_len(&entity->job_queue) == sizeof(job)) | 251 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
244 | first = true; | 252 | first = true; |
245 | 253 | ||
246 | spin_unlock(&entity->queue_lock); | 254 | spin_unlock(&entity->queue_lock); |
247 | 255 | ||
248 | /* first job wakes up scheduler */ | 256 | /* first job wakes up scheduler */ |
249 | if (first) | 257 | if (first) |
250 | amd_sched_wakeup(job->sched); | 258 | amd_sched_wakeup(sched_job->sched); |
251 | 259 | ||
252 | return added; | 260 | return added; |
253 | } | 261 | } |
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job) | |||
255 | /** | 263 | /** |
256 | * Submit a job to the job queue | 264 | * Submit a job to the job queue |
257 | * | 265 | * |
258 | * @job The pointer to job required to submit | 266 | * @sched_job The pointer to job required to submit |
259 | * | 267 | * |
260 | * Returns 0 for success, negative error code otherwise. | 268 | * Returns 0 for success, negative error code otherwise. |
261 | */ | 269 | */ |
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |||
271 | fence_get(&fence->base); | 279 | fence_get(&fence->base); |
272 | sched_job->s_fence = fence; | 280 | sched_job->s_fence = fence; |
273 | 281 | ||
274 | wait_event(entity->scheduler->job_scheduled, | 282 | wait_event(entity->sched->job_scheduled, |
275 | amd_sched_entity_in(sched_job)); | 283 | amd_sched_entity_in(sched_job)); |
276 | 284 | trace_amd_sched_job(sched_job); | |
277 | return 0; | 285 | return 0; |
278 | } | 286 | } |
279 | 287 | ||
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |||
301 | static struct amd_sched_job * | 309 | static struct amd_sched_job * |
302 | amd_sched_select_job(struct amd_gpu_scheduler *sched) | 310 | amd_sched_select_job(struct amd_gpu_scheduler *sched) |
303 | { | 311 | { |
304 | struct amd_sched_job *job; | 312 | struct amd_sched_job *sched_job; |
305 | 313 | ||
306 | if (!amd_sched_ready(sched)) | 314 | if (!amd_sched_ready(sched)) |
307 | return NULL; | 315 | return NULL; |
308 | 316 | ||
309 | /* Kernel run queue has higher priority than normal run queue*/ | 317 | /* Kernel run queue has higher priority than normal run queue*/ |
310 | job = amd_sched_rq_select_job(&sched->kernel_rq); | 318 | sched_job = amd_sched_rq_select_job(&sched->kernel_rq); |
311 | if (job == NULL) | 319 | if (sched_job == NULL) |
312 | job = amd_sched_rq_select_job(&sched->sched_rq); | 320 | sched_job = amd_sched_rq_select_job(&sched->sched_rq); |
313 | 321 | ||
314 | return job; | 322 | return sched_job; |
315 | } | 323 | } |
316 | 324 | ||
317 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | 325 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
318 | { | 326 | { |
319 | struct amd_sched_job *sched_job = | 327 | struct amd_sched_fence *s_fence = |
320 | container_of(cb, struct amd_sched_job, cb); | 328 | container_of(cb, struct amd_sched_fence, cb); |
321 | struct amd_gpu_scheduler *sched; | 329 | struct amd_gpu_scheduler *sched = s_fence->sched; |
322 | 330 | ||
323 | sched = sched_job->sched; | ||
324 | amd_sched_fence_signal(sched_job->s_fence); | ||
325 | atomic_dec(&sched->hw_rq_count); | 331 | atomic_dec(&sched->hw_rq_count); |
326 | fence_put(&sched_job->s_fence->base); | 332 | amd_sched_fence_signal(s_fence); |
327 | sched->ops->process_job(sched_job); | 333 | fence_put(&s_fence->base); |
328 | wake_up_interruptible(&sched->wake_up_worker); | 334 | wake_up_interruptible(&sched->wake_up_worker); |
329 | } | 335 | } |
330 | 336 | ||
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param) | |||
338 | 344 | ||
339 | while (!kthread_should_stop()) { | 345 | while (!kthread_should_stop()) { |
340 | struct amd_sched_entity *entity; | 346 | struct amd_sched_entity *entity; |
341 | struct amd_sched_job *job; | 347 | struct amd_sched_fence *s_fence; |
348 | struct amd_sched_job *sched_job; | ||
342 | struct fence *fence; | 349 | struct fence *fence; |
343 | 350 | ||
344 | wait_event_interruptible(sched->wake_up_worker, | 351 | wait_event_interruptible(sched->wake_up_worker, |
345 | kthread_should_stop() || | 352 | kthread_should_stop() || |
346 | (job = amd_sched_select_job(sched))); | 353 | (sched_job = amd_sched_select_job(sched))); |
347 | 354 | ||
348 | if (!job) | 355 | if (!sched_job) |
349 | continue; | 356 | continue; |
350 | 357 | ||
351 | entity = job->s_entity; | 358 | entity = sched_job->s_entity; |
359 | s_fence = sched_job->s_fence; | ||
352 | atomic_inc(&sched->hw_rq_count); | 360 | atomic_inc(&sched->hw_rq_count); |
353 | fence = sched->ops->run_job(job); | 361 | fence = sched->ops->run_job(sched_job); |
354 | if (fence) { | 362 | if (fence) { |
355 | r = fence_add_callback(fence, &job->cb, | 363 | r = fence_add_callback(fence, &s_fence->cb, |
356 | amd_sched_process_job); | 364 | amd_sched_process_job); |
357 | if (r == -ENOENT) | 365 | if (r == -ENOENT) |
358 | amd_sched_process_job(fence, &job->cb); | 366 | amd_sched_process_job(fence, &s_fence->cb); |
359 | else if (r) | 367 | else if (r) |
360 | DRM_ERROR("fence add callback failed (%d)\n", r); | 368 | DRM_ERROR("fence add callback failed (%d)\n", r); |
361 | fence_put(fence); | 369 | fence_put(fence); |
370 | } else { | ||
371 | DRM_ERROR("Failed to run job!\n"); | ||
372 | amd_sched_process_job(NULL, &s_fence->cb); | ||
362 | } | 373 | } |
363 | 374 | ||
364 | count = kfifo_out(&entity->job_queue, &job, sizeof(job)); | 375 | count = kfifo_out(&entity->job_queue, &sched_job, |
365 | WARN_ON(count != sizeof(job)); | 376 | sizeof(sched_job)); |
377 | WARN_ON(count != sizeof(sched_job)); | ||
366 | wake_up(&sched->job_scheduled); | 378 | wake_up(&sched->job_scheduled); |
367 | } | 379 | } |
368 | return 0; | 380 | return 0; |
369 | } | 381 | } |
370 | 382 | ||
371 | /** | 383 | /** |
372 | * Create a gpu scheduler | 384 | * Init a gpu scheduler instance |
373 | * | 385 | * |
386 | * @sched The pointer to the scheduler | ||
374 | * @ops The backend operations for this scheduler. | 387 | * @ops The backend operations for this scheduler. |
375 | * @ring The the ring id for the scheduler. | ||
376 | * @hw_submissions Number of hw submissions to do. | 388 | * @hw_submissions Number of hw submissions to do. |
389 | * @name Name used for debugging | ||
377 | * | 390 | * |
378 | * Return the pointer to scheduler for success, otherwise return NULL | 391 | * Return 0 on success, otherwise error code. |
379 | */ | 392 | */ |
380 | struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, | 393 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
381 | unsigned ring, unsigned hw_submission, | 394 | struct amd_sched_backend_ops *ops, |
382 | void *priv) | 395 | unsigned hw_submission, const char *name) |
383 | { | 396 | { |
384 | struct amd_gpu_scheduler *sched; | ||
385 | |||
386 | sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); | ||
387 | if (!sched) | ||
388 | return NULL; | ||
389 | |||
390 | sched->ops = ops; | 397 | sched->ops = ops; |
391 | sched->ring_id = ring; | ||
392 | sched->hw_submission_limit = hw_submission; | 398 | sched->hw_submission_limit = hw_submission; |
393 | sched->priv = priv; | 399 | sched->name = name; |
394 | snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring); | ||
395 | amd_sched_rq_init(&sched->sched_rq); | 400 | amd_sched_rq_init(&sched->sched_rq); |
396 | amd_sched_rq_init(&sched->kernel_rq); | 401 | amd_sched_rq_init(&sched->kernel_rq); |
397 | 402 | ||
398 | init_waitqueue_head(&sched->wake_up_worker); | 403 | init_waitqueue_head(&sched->wake_up_worker); |
399 | init_waitqueue_head(&sched->job_scheduled); | 404 | init_waitqueue_head(&sched->job_scheduled); |
400 | atomic_set(&sched->hw_rq_count, 0); | 405 | atomic_set(&sched->hw_rq_count, 0); |
406 | |||
401 | /* Each scheduler will run on a seperate kernel thread */ | 407 | /* Each scheduler will run on a seperate kernel thread */ |
402 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); | 408 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
403 | if (IS_ERR(sched->thread)) { | 409 | if (IS_ERR(sched->thread)) { |
404 | DRM_ERROR("Failed to create scheduler for id %d.\n", ring); | 410 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
405 | kfree(sched); | 411 | return PTR_ERR(sched->thread); |
406 | return NULL; | ||
407 | } | 412 | } |
408 | 413 | ||
409 | return sched; | 414 | return 0; |
410 | } | 415 | } |
411 | 416 | ||
412 | /** | 417 | /** |
413 | * Destroy a gpu scheduler | 418 | * Destroy a gpu scheduler |
414 | * | 419 | * |
415 | * @sched The pointer to the scheduler | 420 | * @sched The pointer to the scheduler |
416 | * | ||
417 | * return 0 if succeed. -1 if failed. | ||
418 | */ | 421 | */ |
419 | int amd_sched_destroy(struct amd_gpu_scheduler *sched) | 422 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
420 | { | 423 | { |
421 | kthread_stop(sched->thread); | 424 | kthread_stop(sched->thread); |
422 | kfree(sched); | ||
423 | return 0; | ||
424 | } | 425 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 2af0e4d4d817..80b64dc22214 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -38,13 +38,15 @@ struct amd_sched_rq; | |||
38 | */ | 38 | */ |
39 | struct amd_sched_entity { | 39 | struct amd_sched_entity { |
40 | struct list_head list; | 40 | struct list_head list; |
41 | struct amd_sched_rq *belongto_rq; | 41 | struct amd_sched_rq *rq; |
42 | atomic_t fence_seq; | 42 | struct amd_gpu_scheduler *sched; |
43 | /* the job_queue maintains the jobs submitted by clients */ | 43 | |
44 | struct kfifo job_queue; | ||
45 | spinlock_t queue_lock; | 44 | spinlock_t queue_lock; |
46 | struct amd_gpu_scheduler *scheduler; | 45 | struct kfifo job_queue; |
46 | |||
47 | atomic_t fence_seq; | ||
47 | uint64_t fence_context; | 48 | uint64_t fence_context; |
49 | |||
48 | struct fence *dependency; | 50 | struct fence *dependency; |
49 | struct fence_cb cb; | 51 | struct fence_cb cb; |
50 | }; | 52 | }; |
@@ -62,13 +64,13 @@ struct amd_sched_rq { | |||
62 | 64 | ||
63 | struct amd_sched_fence { | 65 | struct amd_sched_fence { |
64 | struct fence base; | 66 | struct fence base; |
65 | struct amd_gpu_scheduler *scheduler; | 67 | struct fence_cb cb; |
68 | struct amd_gpu_scheduler *sched; | ||
66 | spinlock_t lock; | 69 | spinlock_t lock; |
67 | void *owner; | 70 | void *owner; |
68 | }; | 71 | }; |
69 | 72 | ||
70 | struct amd_sched_job { | 73 | struct amd_sched_job { |
71 | struct fence_cb cb; | ||
72 | struct amd_gpu_scheduler *sched; | 74 | struct amd_gpu_scheduler *sched; |
73 | struct amd_sched_entity *s_entity; | 75 | struct amd_sched_entity *s_entity; |
74 | struct amd_sched_fence *s_fence; | 76 | struct amd_sched_fence *s_fence; |
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) | |||
91 | * these functions should be implemented in driver side | 93 | * these functions should be implemented in driver side |
92 | */ | 94 | */ |
93 | struct amd_sched_backend_ops { | 95 | struct amd_sched_backend_ops { |
94 | struct fence *(*dependency)(struct amd_sched_job *job); | 96 | struct fence *(*dependency)(struct amd_sched_job *sched_job); |
95 | struct fence *(*run_job)(struct amd_sched_job *job); | 97 | struct fence *(*run_job)(struct amd_sched_job *sched_job); |
96 | void (*process_job)(struct amd_sched_job *job); | ||
97 | }; | 98 | }; |
98 | 99 | ||
99 | /** | 100 | /** |
100 | * One scheduler is implemented for each hardware ring | 101 | * One scheduler is implemented for each hardware ring |
101 | */ | 102 | */ |
102 | struct amd_gpu_scheduler { | 103 | struct amd_gpu_scheduler { |
103 | struct task_struct *thread; | 104 | struct amd_sched_backend_ops *ops; |
105 | uint32_t hw_submission_limit; | ||
106 | const char *name; | ||
104 | struct amd_sched_rq sched_rq; | 107 | struct amd_sched_rq sched_rq; |
105 | struct amd_sched_rq kernel_rq; | 108 | struct amd_sched_rq kernel_rq; |
106 | atomic_t hw_rq_count; | ||
107 | struct amd_sched_backend_ops *ops; | ||
108 | uint32_t ring_id; | ||
109 | wait_queue_head_t wake_up_worker; | 109 | wait_queue_head_t wake_up_worker; |
110 | wait_queue_head_t job_scheduled; | 110 | wait_queue_head_t job_scheduled; |
111 | uint32_t hw_submission_limit; | 111 | atomic_t hw_rq_count; |
112 | char name[20]; | 112 | struct task_struct *thread; |
113 | void *priv; | ||
114 | }; | 113 | }; |
115 | 114 | ||
116 | struct amd_gpu_scheduler * | 115 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
117 | amd_sched_create(struct amd_sched_backend_ops *ops, | 116 | struct amd_sched_backend_ops *ops, |
118 | uint32_t ring, uint32_t hw_submission, void *priv); | 117 | uint32_t hw_submission, const char *name); |
119 | int amd_sched_destroy(struct amd_gpu_scheduler *sched); | 118 | void amd_sched_fini(struct amd_gpu_scheduler *sched); |
120 | 119 | ||
121 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | 120 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
122 | struct amd_sched_entity *entity, | 121 | struct amd_sched_entity *entity, |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index e62c37920e11..d802638094f4 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity | |||
36 | if (fence == NULL) | 36 | if (fence == NULL) |
37 | return NULL; | 37 | return NULL; |
38 | fence->owner = owner; | 38 | fence->owner = owner; |
39 | fence->scheduler = s_entity->scheduler; | 39 | fence->sched = s_entity->sched; |
40 | spin_lock_init(&fence->lock); | 40 | spin_lock_init(&fence->lock); |
41 | 41 | ||
42 | seq = atomic_inc_return(&s_entity->fence_seq); | 42 | seq = atomic_inc_return(&s_entity->fence_seq); |
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence) | |||
63 | static const char *amd_sched_fence_get_timeline_name(struct fence *f) | 63 | static const char *amd_sched_fence_get_timeline_name(struct fence *f) |
64 | { | 64 | { |
65 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | 65 | struct amd_sched_fence *fence = to_amd_sched_fence(f); |
66 | return (const char *)fence->scheduler->name; | 66 | return (const char *)fence->sched->name; |
67 | } | 67 | } |
68 | 68 | ||
69 | static bool amd_sched_fence_enable_signaling(struct fence *f) | 69 | static bool amd_sched_fence_enable_signaling(struct fence *f) |
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..5bca390d9ae2 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, | |||
53 | struct drm_dp_mst_port *port, | 53 | struct drm_dp_mst_port *port, |
54 | int offset, int size, u8 *bytes); | 54 | int offset, int size, u8 *bytes); |
55 | 55 | ||
56 | static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | 56 | static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, |
57 | struct drm_dp_mst_branch *mstb); | 57 | struct drm_dp_mst_branch *mstb); |
58 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, | 58 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, |
59 | struct drm_dp_mst_branch *mstb, | 59 | struct drm_dp_mst_branch *mstb, |
60 | struct drm_dp_mst_port *port); | 60 | struct drm_dp_mst_port *port); |
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) | |||
804 | struct drm_dp_mst_port *port, *tmp; | 804 | struct drm_dp_mst_port *port, *tmp; |
805 | bool wake_tx = false; | 805 | bool wake_tx = false; |
806 | 806 | ||
807 | cancel_work_sync(&mstb->mgr->work); | ||
808 | |||
809 | /* | 807 | /* |
810 | * destroy all ports - don't need lock | 808 | * destroy all ports - don't need lock |
811 | * as there are no more references to the mst branch | 809 | * as there are no more references to the mst branch |
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) | |||
863 | { | 861 | { |
864 | struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); | 862 | struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); |
865 | struct drm_dp_mst_topology_mgr *mgr = port->mgr; | 863 | struct drm_dp_mst_topology_mgr *mgr = port->mgr; |
864 | |||
866 | if (!port->input) { | 865 | if (!port->input) { |
867 | port->vcpi.num_slots = 0; | 866 | port->vcpi.num_slots = 0; |
868 | 867 | ||
869 | kfree(port->cached_edid); | 868 | kfree(port->cached_edid); |
870 | 869 | ||
871 | /* we can't destroy the connector here, as | 870 | /* |
872 | we might be holding the mode_config.mutex | 871 | * The only time we don't have a connector |
873 | from an EDID retrieval */ | 872 | * on an output port is if the connector init |
873 | * fails. | ||
874 | */ | ||
874 | if (port->connector) { | 875 | if (port->connector) { |
876 | /* we can't destroy the connector here, as | ||
877 | * we might be holding the mode_config.mutex | ||
878 | * from an EDID retrieval */ | ||
879 | |||
875 | mutex_lock(&mgr->destroy_connector_lock); | 880 | mutex_lock(&mgr->destroy_connector_lock); |
876 | list_add(&port->next, &mgr->destroy_connector_list); | 881 | list_add(&port->next, &mgr->destroy_connector_list); |
877 | mutex_unlock(&mgr->destroy_connector_lock); | 882 | mutex_unlock(&mgr->destroy_connector_lock); |
878 | schedule_work(&mgr->destroy_connector_work); | 883 | schedule_work(&mgr->destroy_connector_work); |
879 | return; | 884 | return; |
880 | } | 885 | } |
886 | /* no need to clean up vcpi | ||
887 | * as if we have no connector we never setup a vcpi */ | ||
881 | drm_dp_port_teardown_pdt(port, port->pdt); | 888 | drm_dp_port_teardown_pdt(port, port->pdt); |
882 | |||
883 | if (!port->input && port->vcpi.vcpi > 0) | ||
884 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | ||
885 | } | 889 | } |
886 | kfree(port); | 890 | kfree(port); |
887 | |||
888 | (*mgr->cbs->hotplug)(mgr); | ||
889 | } | 891 | } |
890 | 892 | ||
891 | static void drm_dp_put_port(struct drm_dp_mst_port *port) | 893 | static void drm_dp_put_port(struct drm_dp_mst_port *port) |
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, | |||
1027 | } | 1029 | } |
1028 | } | 1030 | } |
1029 | 1031 | ||
1030 | static void build_mst_prop_path(struct drm_dp_mst_port *port, | 1032 | static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, |
1031 | struct drm_dp_mst_branch *mstb, | 1033 | int pnum, |
1032 | char *proppath, | 1034 | char *proppath, |
1033 | size_t proppath_size) | 1035 | size_t proppath_size) |
1034 | { | 1036 | { |
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, | |||
1041 | snprintf(temp, sizeof(temp), "-%d", port_num); | 1043 | snprintf(temp, sizeof(temp), "-%d", port_num); |
1042 | strlcat(proppath, temp, proppath_size); | 1044 | strlcat(proppath, temp, proppath_size); |
1043 | } | 1045 | } |
1044 | snprintf(temp, sizeof(temp), "-%d", port->port_num); | 1046 | snprintf(temp, sizeof(temp), "-%d", pnum); |
1045 | strlcat(proppath, temp, proppath_size); | 1047 | strlcat(proppath, temp, proppath_size); |
1046 | } | 1048 | } |
1047 | 1049 | ||
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, | |||
1105 | drm_dp_port_teardown_pdt(port, old_pdt); | 1107 | drm_dp_port_teardown_pdt(port, old_pdt); |
1106 | 1108 | ||
1107 | ret = drm_dp_port_setup_pdt(port); | 1109 | ret = drm_dp_port_setup_pdt(port); |
1108 | if (ret == true) { | 1110 | if (ret == true) |
1109 | drm_dp_send_link_address(mstb->mgr, port->mstb); | 1111 | drm_dp_send_link_address(mstb->mgr, port->mstb); |
1110 | port->mstb->link_address_sent = true; | ||
1111 | } | ||
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | if (created && !port->input) { | 1114 | if (created && !port->input) { |
1115 | char proppath[255]; | 1115 | char proppath[255]; |
1116 | build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); | ||
1117 | port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); | ||
1118 | 1116 | ||
1119 | if (port->port_num >= 8) { | 1117 | build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); |
1118 | port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); | ||
1119 | if (!port->connector) { | ||
1120 | /* remove it from the port list */ | ||
1121 | mutex_lock(&mstb->mgr->lock); | ||
1122 | list_del(&port->next); | ||
1123 | mutex_unlock(&mstb->mgr->lock); | ||
1124 | /* drop port list reference */ | ||
1125 | drm_dp_put_port(port); | ||
1126 | goto out; | ||
1127 | } | ||
1128 | if (port->port_num >= DP_MST_LOGICAL_PORT_0) { | ||
1120 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); | 1129 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); |
1130 | drm_mode_connector_set_tile_property(port->connector); | ||
1121 | } | 1131 | } |
1132 | (*mstb->mgr->cbs->register_connector)(port->connector); | ||
1122 | } | 1133 | } |
1123 | 1134 | ||
1135 | out: | ||
1124 | /* put reference to this port */ | 1136 | /* put reference to this port */ |
1125 | drm_dp_put_port(port); | 1137 | drm_dp_put_port(port); |
1126 | } | 1138 | } |
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m | |||
1202 | { | 1214 | { |
1203 | struct drm_dp_mst_port *port; | 1215 | struct drm_dp_mst_port *port; |
1204 | struct drm_dp_mst_branch *mstb_child; | 1216 | struct drm_dp_mst_branch *mstb_child; |
1205 | if (!mstb->link_address_sent) { | 1217 | if (!mstb->link_address_sent) |
1206 | drm_dp_send_link_address(mgr, mstb); | 1218 | drm_dp_send_link_address(mgr, mstb); |
1207 | mstb->link_address_sent = true; | 1219 | |
1208 | } | ||
1209 | list_for_each_entry(port, &mstb->ports, next) { | 1220 | list_for_each_entry(port, &mstb->ports, next) { |
1210 | if (port->input) | 1221 | if (port->input) |
1211 | continue; | 1222 | continue; |
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, | |||
1458 | mutex_unlock(&mgr->qlock); | 1469 | mutex_unlock(&mgr->qlock); |
1459 | } | 1470 | } |
1460 | 1471 | ||
1461 | static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | 1472 | static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, |
1462 | struct drm_dp_mst_branch *mstb) | 1473 | struct drm_dp_mst_branch *mstb) |
1463 | { | 1474 | { |
1464 | int len; | 1475 | int len; |
1465 | struct drm_dp_sideband_msg_tx *txmsg; | 1476 | struct drm_dp_sideband_msg_tx *txmsg; |
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | |||
1467 | 1478 | ||
1468 | txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); | 1479 | txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); |
1469 | if (!txmsg) | 1480 | if (!txmsg) |
1470 | return -ENOMEM; | 1481 | return; |
1471 | 1482 | ||
1472 | txmsg->dst = mstb; | 1483 | txmsg->dst = mstb; |
1473 | len = build_link_address(txmsg); | 1484 | len = build_link_address(txmsg); |
1474 | 1485 | ||
1486 | mstb->link_address_sent = true; | ||
1475 | drm_dp_queue_down_tx(mgr, txmsg); | 1487 | drm_dp_queue_down_tx(mgr, txmsg); |
1476 | 1488 | ||
1477 | ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); | 1489 | ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); |
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | |||
1499 | } | 1511 | } |
1500 | (*mgr->cbs->hotplug)(mgr); | 1512 | (*mgr->cbs->hotplug)(mgr); |
1501 | } | 1513 | } |
1502 | } else | 1514 | } else { |
1515 | mstb->link_address_sent = false; | ||
1503 | DRM_DEBUG_KMS("link address failed %d\n", ret); | 1516 | DRM_DEBUG_KMS("link address failed %d\n", ret); |
1517 | } | ||
1504 | 1518 | ||
1505 | kfree(txmsg); | 1519 | kfree(txmsg); |
1506 | return 0; | ||
1507 | } | 1520 | } |
1508 | 1521 | ||
1509 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, | 1522 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, |
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) | |||
1978 | drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, | 1991 | drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, |
1979 | DP_MST_EN | DP_UPSTREAM_IS_SRC); | 1992 | DP_MST_EN | DP_UPSTREAM_IS_SRC); |
1980 | mutex_unlock(&mgr->lock); | 1993 | mutex_unlock(&mgr->lock); |
1994 | flush_work(&mgr->work); | ||
1995 | flush_work(&mgr->destroy_connector_work); | ||
1981 | } | 1996 | } |
1982 | EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); | 1997 | EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); |
1983 | 1998 | ||
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ | |||
2263 | 2278 | ||
2264 | if (port->cached_edid) | 2279 | if (port->cached_edid) |
2265 | edid = drm_edid_duplicate(port->cached_edid); | 2280 | edid = drm_edid_duplicate(port->cached_edid); |
2266 | else | 2281 | else { |
2267 | edid = drm_get_edid(connector, &port->aux.ddc); | 2282 | edid = drm_get_edid(connector, &port->aux.ddc); |
2268 | 2283 | drm_mode_connector_set_tile_property(connector); | |
2269 | drm_mode_connector_set_tile_property(connector); | 2284 | } |
2270 | drm_dp_put_port(port); | 2285 | drm_dp_put_port(port); |
2271 | return edid; | 2286 | return edid; |
2272 | } | 2287 | } |
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
2671 | { | 2686 | { |
2672 | struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); | 2687 | struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); |
2673 | struct drm_dp_mst_port *port; | 2688 | struct drm_dp_mst_port *port; |
2674 | 2689 | bool send_hotplug = false; | |
2675 | /* | 2690 | /* |
2676 | * Not a regular list traverse as we have to drop the destroy | 2691 | * Not a regular list traverse as we have to drop the destroy |
2677 | * connector lock before destroying the connector, to avoid AB->BA | 2692 | * connector lock before destroying the connector, to avoid AB->BA |
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
2694 | if (!port->input && port->vcpi.vcpi > 0) | 2709 | if (!port->input && port->vcpi.vcpi > 0) |
2695 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | 2710 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); |
2696 | kfree(port); | 2711 | kfree(port); |
2712 | send_hotplug = true; | ||
2697 | } | 2713 | } |
2714 | if (send_hotplug) | ||
2715 | (*mgr->cbs->hotplug)(mgr); | ||
2698 | } | 2716 | } |
2699 | 2717 | ||
2700 | /** | 2718 | /** |
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); | |||
2747 | */ | 2765 | */ |
2748 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) | 2766 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) |
2749 | { | 2767 | { |
2768 | flush_work(&mgr->work); | ||
2750 | flush_work(&mgr->destroy_connector_work); | 2769 | flush_work(&mgr->destroy_connector_work); |
2751 | mutex_lock(&mgr->payload_lock); | 2770 | mutex_lock(&mgr->payload_lock); |
2752 | kfree(mgr->payloads); | 2771 | kfree(mgr->payloads); |
@@ -2782,12 +2801,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs | |||
2782 | if (msgs[num - 1].flags & I2C_M_RD) | 2801 | if (msgs[num - 1].flags & I2C_M_RD) |
2783 | reading = true; | 2802 | reading = true; |
2784 | 2803 | ||
2785 | if (!reading) { | 2804 | if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { |
2786 | DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); | 2805 | DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); |
2787 | ret = -EIO; | 2806 | ret = -EIO; |
2788 | goto out; | 2807 | goto out; |
2789 | } | 2808 | } |
2790 | 2809 | ||
2810 | memset(&msg, 0, sizeof(msg)); | ||
2791 | msg.req_type = DP_REMOTE_I2C_READ; | 2811 | msg.req_type = DP_REMOTE_I2C_READ; |
2792 | msg.u.i2c_read.num_transactions = num - 1; | 2812 | msg.u.i2c_read.num_transactions = num - 1; |
2793 | msg.u.i2c_read.port_number = port->port_num; | 2813 | msg.u.i2c_read.port_number = port->port_num; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) | |||
345 | struct drm_crtc *crtc = mode_set->crtc; | 345 | struct drm_crtc *crtc = mode_set->crtc; |
346 | int ret; | 346 | int ret; |
347 | 347 | ||
348 | if (crtc->funcs->cursor_set) { | 348 | if (crtc->funcs->cursor_set2) { |
349 | ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); | ||
350 | if (ret) | ||
351 | error = true; | ||
352 | } else if (crtc->funcs->cursor_set) { | ||
349 | ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); | 353 | ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); |
350 | if (ret) | 354 | if (ret) |
351 | error = true; | 355 | error = true; |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9a860ca1e9d7..d93e7378c077 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit); | |||
520 | 520 | ||
521 | /** Ioctl table */ | 521 | /** Ioctl table */ |
522 | static const struct drm_ioctl_desc drm_ioctls[] = { | 522 | static const struct drm_ioctl_desc drm_ioctls[] = { |
523 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 523 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, |
524 | DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), | ||
524 | DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), | 525 | DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), |
525 | DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), | 526 | DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), |
526 | DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), | 527 | DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 96 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
97 | static void __drm_kms_helper_poll_enable(struct drm_device *dev) | 97 | /** |
98 | * drm_kms_helper_poll_enable_locked - re-enable output polling. | ||
99 | * @dev: drm_device | ||
100 | * | ||
101 | * This function re-enables the output polling work without | ||
102 | * locking the mode_config mutex. | ||
103 | * | ||
104 | * This is like drm_kms_helper_poll_enable() however it is to be | ||
105 | * called from a context where the mode_config mutex is locked | ||
106 | * already. | ||
107 | */ | ||
108 | void drm_kms_helper_poll_enable_locked(struct drm_device *dev) | ||
98 | { | 109 | { |
99 | bool poll = false; | 110 | bool poll = false; |
100 | struct drm_connector *connector; | 111 | struct drm_connector *connector; |
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) | |||
113 | if (poll) | 124 | if (poll) |
114 | schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); | 125 | schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
115 | } | 126 | } |
127 | EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); | ||
128 | |||
116 | 129 | ||
117 | static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, | 130 | static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, |
118 | uint32_t maxX, uint32_t maxY, bool merge_type_bits) | 131 | uint32_t maxX, uint32_t maxY, bool merge_type_bits) |
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
174 | 187 | ||
175 | /* Re-enable polling in case the global poll config changed. */ | 188 | /* Re-enable polling in case the global poll config changed. */ |
176 | if (drm_kms_helper_poll != dev->mode_config.poll_running) | 189 | if (drm_kms_helper_poll != dev->mode_config.poll_running) |
177 | __drm_kms_helper_poll_enable(dev); | 190 | drm_kms_helper_poll_enable_locked(dev); |
178 | 191 | ||
179 | dev->mode_config.poll_running = drm_kms_helper_poll; | 192 | dev->mode_config.poll_running = drm_kms_helper_poll; |
180 | 193 | ||
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); | |||
428 | void drm_kms_helper_poll_enable(struct drm_device *dev) | 441 | void drm_kms_helper_poll_enable(struct drm_device *dev) |
429 | { | 442 | { |
430 | mutex_lock(&dev->mode_config.mutex); | 443 | mutex_lock(&dev->mode_config.mutex); |
431 | __drm_kms_helper_poll_enable(dev); | 444 | drm_kms_helper_poll_enable_locked(dev); |
432 | mutex_unlock(&dev->mode_config.mutex); | 445 | mutex_unlock(&dev->mode_config.mutex); |
433 | } | 446 | } |
434 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | 447 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0f6cd33b531f..684bd4a13843 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, | |||
235 | char *buf) | 235 | char *buf) |
236 | { | 236 | { |
237 | struct drm_connector *connector = to_drm_connector(device); | 237 | struct drm_connector *connector = to_drm_connector(device); |
238 | struct drm_device *dev = connector->dev; | 238 | int dpms; |
239 | uint64_t dpms_status; | ||
240 | int ret; | ||
241 | 239 | ||
242 | ret = drm_object_property_get_value(&connector->base, | 240 | dpms = READ_ONCE(connector->dpms); |
243 | dev->mode_config.dpms_property, | ||
244 | &dpms_status); | ||
245 | if (ret) | ||
246 | return 0; | ||
247 | 241 | ||
248 | return snprintf(buf, PAGE_SIZE, "%s\n", | 242 | return snprintf(buf, PAGE_SIZE, "%s\n", |
249 | drm_get_dpms_name((int)dpms_status)); | 243 | drm_get_dpms_name(dpms)); |
250 | } | 244 | } |
251 | 245 | ||
252 | static ssize_t enabled_show(struct device *device, | 246 | static ssize_t enabled_show(struct device *device, |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -37,7 +37,6 @@ | |||
37 | * DECON stands for Display and Enhancement controller. | 37 | * DECON stands for Display and Enhancement controller. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define DECON_DEFAULT_FRAMERATE 60 | ||
41 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 | 40 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 |
42 | 41 | ||
43 | #define WINDOWS_NR 2 | 42 | #define WINDOWS_NR 2 |
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, | |||
165 | return (clkdiv < 0x100) ? clkdiv : 0xff; | 164 | return (clkdiv < 0x100) ? clkdiv : 0xff; |
166 | } | 165 | } |
167 | 166 | ||
168 | static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, | ||
169 | const struct drm_display_mode *mode, | ||
170 | struct drm_display_mode *adjusted_mode) | ||
171 | { | ||
172 | if (adjusted_mode->vrefresh == 0) | ||
173 | adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; | ||
174 | |||
175 | return true; | ||
176 | } | ||
177 | |||
178 | static void decon_commit(struct exynos_drm_crtc *crtc) | 167 | static void decon_commit(struct exynos_drm_crtc *crtc) |
179 | { | 168 | { |
180 | struct decon_context *ctx = crtc->ctx; | 169 | struct decon_context *ctx = crtc->ctx; |
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) | |||
637 | static const struct exynos_drm_crtc_ops decon_crtc_ops = { | 626 | static const struct exynos_drm_crtc_ops decon_crtc_ops = { |
638 | .enable = decon_enable, | 627 | .enable = decon_enable, |
639 | .disable = decon_disable, | 628 | .disable = decon_disable, |
640 | .mode_fixup = decon_mode_fixup, | ||
641 | .commit = decon_commit, | 629 | .commit = decon_commit, |
642 | .enable_vblank = decon_enable_vblank, | 630 | .enable_vblank = decon_enable_vblank, |
643 | .disable_vblank = decon_disable_vblank, | 631 | .disable_vblank = decon_disable_vblank, |
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c | |||
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) | |||
1383 | return 0; | 1383 | return 0; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | #ifdef CONFIG_PM_SLEEP | ||
1387 | static int exynos_dp_suspend(struct device *dev) | ||
1388 | { | ||
1389 | struct exynos_dp_device *dp = dev_get_drvdata(dev); | ||
1390 | |||
1391 | exynos_dp_disable(&dp->encoder); | ||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1395 | static int exynos_dp_resume(struct device *dev) | ||
1396 | { | ||
1397 | struct exynos_dp_device *dp = dev_get_drvdata(dev); | ||
1398 | |||
1399 | exynos_dp_enable(&dp->encoder); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | #endif | ||
1403 | |||
1404 | static const struct dev_pm_ops exynos_dp_pm_ops = { | ||
1405 | SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) | ||
1406 | }; | ||
1407 | |||
1408 | static const struct of_device_id exynos_dp_match[] = { | 1386 | static const struct of_device_id exynos_dp_match[] = { |
1409 | { .compatible = "samsung,exynos5-dp" }, | 1387 | { .compatible = "samsung,exynos5-dp" }, |
1410 | {}, | 1388 | {}, |
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { | |||
1417 | .driver = { | 1395 | .driver = { |
1418 | .name = "exynos-dp", | 1396 | .name = "exynos-dp", |
1419 | .owner = THIS_MODULE, | 1397 | .owner = THIS_MODULE, |
1420 | .pm = &exynos_dp_pm_ops, | ||
1421 | .of_match_table = exynos_dp_match, | 1398 | .of_match_table = exynos_dp_match, |
1422 | }, | 1399 | }, |
1423 | }; | 1400 | }; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) | |||
28 | 28 | ||
29 | return 0; | 29 | return 0; |
30 | } | 30 | } |
31 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); | ||
32 | 31 | ||
33 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | 32 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) |
34 | { | 33 | { |
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | |||
39 | 38 | ||
40 | return 0; | 39 | return 0; |
41 | } | 40 | } |
42 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); | ||
43 | 41 | ||
44 | int exynos_drm_device_subdrv_probe(struct drm_device *dev) | 42 | int exynos_drm_device_subdrv_probe(struct drm_device *dev) |
45 | { | 43 | { |
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) | |||
69 | 67 | ||
70 | return 0; | 68 | return 0; |
71 | } | 69 | } |
72 | EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); | ||
73 | 70 | ||
74 | int exynos_drm_device_subdrv_remove(struct drm_device *dev) | 71 | int exynos_drm_device_subdrv_remove(struct drm_device *dev) |
75 | { | 72 | { |
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) | |||
87 | 84 | ||
88 | return 0; | 85 | return 0; |
89 | } | 86 | } |
90 | EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); | ||
91 | 87 | ||
92 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) | 88 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) |
93 | { | 89 | { |
@@ -111,7 +107,6 @@ err: | |||
111 | } | 107 | } |
112 | return ret; | 108 | return ret; |
113 | } | 109 | } |
114 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); | ||
115 | 110 | ||
116 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) | 111 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) |
117 | { | 112 | { |
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) | |||
122 | subdrv->close(dev, subdrv->dev, file); | 117 | subdrv->close(dev, subdrv->dev, file); |
123 | } | 118 | } |
124 | } | 119 | } |
125 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) | |||
41 | exynos_crtc->ops->disable(exynos_crtc); | 41 | exynos_crtc->ops->disable(exynos_crtc); |
42 | } | 42 | } |
43 | 43 | ||
44 | static bool | ||
45 | exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, | ||
46 | const struct drm_display_mode *mode, | ||
47 | struct drm_display_mode *adjusted_mode) | ||
48 | { | ||
49 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
50 | |||
51 | if (exynos_crtc->ops->mode_fixup) | ||
52 | return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, | ||
53 | adjusted_mode); | ||
54 | |||
55 | return true; | ||
56 | } | ||
57 | |||
58 | static void | 44 | static void |
59 | exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | 45 | exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) |
60 | { | 46 | { |
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, | |||
99 | static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | 85 | static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { |
100 | .enable = exynos_drm_crtc_enable, | 86 | .enable = exynos_drm_crtc_enable, |
101 | .disable = exynos_drm_crtc_disable, | 87 | .disable = exynos_drm_crtc_disable, |
102 | .mode_fixup = exynos_drm_crtc_mode_fixup, | ||
103 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, | 88 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, |
104 | .atomic_begin = exynos_crtc_atomic_begin, | 89 | .atomic_begin = exynos_crtc_atomic_begin, |
105 | .atomic_flush = exynos_crtc_atomic_flush, | 90 | .atomic_flush = exynos_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | #ifdef CONFIG_PM_SLEEP | ||
307 | static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) | 308 | static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) |
308 | { | 309 | { |
309 | struct drm_connector *connector; | 310 | struct drm_connector *connector; |
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) | |||
340 | 341 | ||
341 | return 0; | 342 | return 0; |
342 | } | 343 | } |
344 | #endif | ||
343 | 345 | ||
344 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | 346 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) |
345 | { | 347 | { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -82,7 +82,6 @@ struct exynos_drm_plane { | |||
82 | * | 82 | * |
83 | * @enable: enable the device | 83 | * @enable: enable the device |
84 | * @disable: disable the device | 84 | * @disable: disable the device |
85 | * @mode_fixup: fix mode data before applying it | ||
86 | * @commit: set current hw specific display mode to hw. | 85 | * @commit: set current hw specific display mode to hw. |
87 | * @enable_vblank: specific driver callback for enabling vblank interrupt. | 86 | * @enable_vblank: specific driver callback for enabling vblank interrupt. |
88 | * @disable_vblank: specific driver callback for disabling vblank interrupt. | 87 | * @disable_vblank: specific driver callback for disabling vblank interrupt. |
@@ -103,9 +102,6 @@ struct exynos_drm_crtc; | |||
103 | struct exynos_drm_crtc_ops { | 102 | struct exynos_drm_crtc_ops { |
104 | void (*enable)(struct exynos_drm_crtc *crtc); | 103 | void (*enable)(struct exynos_drm_crtc *crtc); |
105 | void (*disable)(struct exynos_drm_crtc *crtc); | 104 | void (*disable)(struct exynos_drm_crtc *crtc); |
106 | bool (*mode_fixup)(struct exynos_drm_crtc *crtc, | ||
107 | const struct drm_display_mode *mode, | ||
108 | struct drm_display_mode *adjusted_mode); | ||
109 | void (*commit)(struct exynos_drm_crtc *crtc); | 105 | void (*commit)(struct exynos_drm_crtc *crtc); |
110 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); | 106 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); |
111 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); | 107 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { | |||
1206 | .set_addr = fimc_dst_set_addr, | 1206 | .set_addr = fimc_dst_set_addr, |
1207 | }; | 1207 | }; |
1208 | 1208 | ||
1209 | static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) | ||
1210 | { | ||
1211 | DRM_DEBUG_KMS("enable[%d]\n", enable); | ||
1212 | |||
1213 | if (enable) { | ||
1214 | clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); | ||
1215 | clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); | ||
1216 | ctx->suspended = false; | ||
1217 | } else { | ||
1218 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); | ||
1219 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); | ||
1220 | ctx->suspended = true; | ||
1221 | } | ||
1222 | |||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | static irqreturn_t fimc_irq_handler(int irq, void *dev_id) | 1209 | static irqreturn_t fimc_irq_handler(int irq, void *dev_id) |
1227 | { | 1210 | { |
1228 | struct fimc_context *ctx = dev_id; | 1211 | struct fimc_context *ctx = dev_id; |
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) | |||
1780 | return 0; | 1763 | return 0; |
1781 | } | 1764 | } |
1782 | 1765 | ||
1766 | #ifdef CONFIG_PM | ||
1767 | static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) | ||
1768 | { | ||
1769 | DRM_DEBUG_KMS("enable[%d]\n", enable); | ||
1770 | |||
1771 | if (enable) { | ||
1772 | clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); | ||
1773 | clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); | ||
1774 | ctx->suspended = false; | ||
1775 | } else { | ||
1776 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); | ||
1777 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); | ||
1778 | ctx->suspended = true; | ||
1779 | } | ||
1780 | |||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1783 | #ifdef CONFIG_PM_SLEEP | 1784 | #ifdef CONFIG_PM_SLEEP |
1784 | static int fimc_suspend(struct device *dev) | 1785 | static int fimc_suspend(struct device *dev) |
1785 | { | 1786 | { |
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) | |||
1806 | } | 1807 | } |
1807 | #endif | 1808 | #endif |
1808 | 1809 | ||
1809 | #ifdef CONFIG_PM | ||
1810 | static int fimc_runtime_suspend(struct device *dev) | 1810 | static int fimc_runtime_suspend(struct device *dev) |
1811 | { | 1811 | { |
1812 | struct fimc_context *ctx = get_fimc_context(dev); | 1812 | struct fimc_context *ctx = get_fimc_context(dev); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -41,7 +41,6 @@ | |||
41 | * CPU Interface. | 41 | * CPU Interface. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define FIMD_DEFAULT_FRAMERATE 60 | ||
45 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 | 44 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 |
46 | 45 | ||
47 | /* position control register for hardware window 0, 2 ~ 4.*/ | 46 | /* position control register for hardware window 0, 2 ~ 4.*/ |
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, | |||
377 | return (clkdiv < 0x100) ? clkdiv : 0xff; | 376 | return (clkdiv < 0x100) ? clkdiv : 0xff; |
378 | } | 377 | } |
379 | 378 | ||
380 | static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, | ||
381 | const struct drm_display_mode *mode, | ||
382 | struct drm_display_mode *adjusted_mode) | ||
383 | { | ||
384 | if (adjusted_mode->vrefresh == 0) | ||
385 | adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; | ||
386 | |||
387 | return true; | ||
388 | } | ||
389 | |||
390 | static void fimd_commit(struct exynos_drm_crtc *crtc) | 379 | static void fimd_commit(struct exynos_drm_crtc *crtc) |
391 | { | 380 | { |
392 | struct fimd_context *ctx = crtc->ctx; | 381 | struct fimd_context *ctx = crtc->ctx; |
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) | |||
882 | return; | 871 | return; |
883 | 872 | ||
884 | val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; | 873 | val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; |
885 | writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); | 874 | writel(val, ctx->regs + DP_MIE_CLKCON); |
886 | } | 875 | } |
887 | 876 | ||
888 | static const struct exynos_drm_crtc_ops fimd_crtc_ops = { | 877 | static const struct exynos_drm_crtc_ops fimd_crtc_ops = { |
889 | .enable = fimd_enable, | 878 | .enable = fimd_enable, |
890 | .disable = fimd_disable, | 879 | .disable = fimd_disable, |
891 | .mode_fixup = fimd_mode_fixup, | ||
892 | .commit = fimd_commit, | 880 | .commit = fimd_commit, |
893 | .enable_vblank = fimd_enable_vblank, | 881 | .enable_vblank = fimd_enable_vblank, |
894 | .disable_vblank = fimd_disable_vblank, | 882 | .disable_vblank = fimd_disable_vblank, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, | |||
1059 | 1059 | ||
1060 | return 0; | 1060 | return 0; |
1061 | } | 1061 | } |
1062 | EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); | ||
1063 | 1062 | ||
1064 | int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | 1063 | int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, |
1065 | struct drm_file *file) | 1064 | struct drm_file *file) |
@@ -1230,7 +1229,6 @@ err: | |||
1230 | g2d_put_cmdlist(g2d, node); | 1229 | g2d_put_cmdlist(g2d, node); |
1231 | return ret; | 1230 | return ret; |
1232 | } | 1231 | } |
1233 | EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); | ||
1234 | 1232 | ||
1235 | int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | 1233 | int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, |
1236 | struct drm_file *file) | 1234 | struct drm_file *file) |
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
1293 | out: | 1291 | out: |
1294 | return 0; | 1292 | return 0; |
1295 | } | 1293 | } |
1296 | EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); | ||
1297 | 1294 | ||
1298 | static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | 1295 | static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) |
1299 | { | 1296 | { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) | |||
56 | nr_pages = obj->size >> PAGE_SHIFT; | 56 | nr_pages = obj->size >> PAGE_SHIFT; |
57 | 57 | ||
58 | if (!is_drm_iommu_supported(dev)) { | 58 | if (!is_drm_iommu_supported(dev)) { |
59 | dma_addr_t start_addr; | ||
60 | unsigned int i = 0; | ||
61 | |||
62 | obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); | 59 | obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); |
63 | if (!obj->pages) { | 60 | if (!obj->pages) { |
64 | DRM_ERROR("failed to allocate pages.\n"); | 61 | DRM_ERROR("failed to allocate pages.\n"); |
65 | return -ENOMEM; | 62 | return -ENOMEM; |
66 | } | 63 | } |
64 | } | ||
67 | 65 | ||
68 | obj->cookie = dma_alloc_attrs(dev->dev, | 66 | obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, |
69 | obj->size, | 67 | GFP_KERNEL, &obj->dma_attrs); |
70 | &obj->dma_addr, GFP_KERNEL, | 68 | if (!obj->cookie) { |
71 | &obj->dma_attrs); | 69 | DRM_ERROR("failed to allocate buffer.\n"); |
72 | if (!obj->cookie) { | 70 | if (obj->pages) |
73 | DRM_ERROR("failed to allocate buffer.\n"); | ||
74 | drm_free_large(obj->pages); | 71 | drm_free_large(obj->pages); |
75 | return -ENOMEM; | 72 | return -ENOMEM; |
76 | } | 73 | } |
74 | |||
75 | if (obj->pages) { | ||
76 | dma_addr_t start_addr; | ||
77 | unsigned int i = 0; | ||
77 | 78 | ||
78 | start_addr = obj->dma_addr; | 79 | start_addr = obj->dma_addr; |
79 | while (i < nr_pages) { | 80 | while (i < nr_pages) { |
80 | obj->pages[i] = phys_to_page(start_addr); | 81 | obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, |
82 | start_addr)); | ||
81 | start_addr += PAGE_SIZE; | 83 | start_addr += PAGE_SIZE; |
82 | i++; | 84 | i++; |
83 | } | 85 | } |
84 | } else { | 86 | } else { |
85 | obj->pages = dma_alloc_attrs(dev->dev, obj->size, | 87 | obj->pages = obj->cookie; |
86 | &obj->dma_addr, GFP_KERNEL, | ||
87 | &obj->dma_attrs); | ||
88 | if (!obj->pages) { | ||
89 | DRM_ERROR("failed to allocate buffer.\n"); | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | } | 88 | } |
93 | 89 | ||
94 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | 90 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", |
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) | |||
110 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | 106 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", |
111 | (unsigned long)obj->dma_addr, obj->size); | 107 | (unsigned long)obj->dma_addr, obj->size); |
112 | 108 | ||
113 | if (!is_drm_iommu_supported(dev)) { | 109 | dma_free_attrs(dev->dev, obj->size, obj->cookie, |
114 | dma_free_attrs(dev->dev, obj->size, obj->cookie, | 110 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); |
115 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
116 | drm_free_large(obj->pages); | ||
117 | } else | ||
118 | dma_free_attrs(dev->dev, obj->size, obj->pages, | ||
119 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
120 | 111 | ||
121 | obj->dma_addr = (dma_addr_t)NULL; | 112 | if (!is_drm_iommu_supported(dev)) |
113 | drm_free_large(obj->pages); | ||
122 | } | 114 | } |
123 | 115 | ||
124 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | 116 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, |
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | |||
156 | * once dmabuf's refcount becomes 0. | 148 | * once dmabuf's refcount becomes 0. |
157 | */ | 149 | */ |
158 | if (obj->import_attach) | 150 | if (obj->import_attach) |
159 | goto out; | 151 | drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); |
160 | 152 | else | |
161 | exynos_drm_free_buf(exynos_gem_obj); | 153 | exynos_drm_free_buf(exynos_gem_obj); |
162 | |||
163 | out: | ||
164 | drm_gem_free_mmap_offset(obj); | ||
165 | 154 | ||
166 | /* release file pointer to gem object. */ | 155 | /* release file pointer to gem object. */ |
167 | drm_gem_object_release(obj); | 156 | drm_gem_object_release(obj); |
168 | 157 | ||
169 | kfree(exynos_gem_obj); | 158 | kfree(exynos_gem_obj); |
170 | exynos_gem_obj = NULL; | ||
171 | } | 159 | } |
172 | 160 | ||
173 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | 161 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, |
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | |||
190 | return exynos_gem_obj->size; | 178 | return exynos_gem_obj->size; |
191 | } | 179 | } |
192 | 180 | ||
193 | 181 | static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |
194 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | ||
195 | unsigned long size) | 182 | unsigned long size) |
196 | { | 183 | { |
197 | struct exynos_drm_gem_obj *exynos_gem_obj; | 184 | struct exynos_drm_gem_obj *exynos_gem_obj; |
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |||
212 | return ERR_PTR(ret); | 199 | return ERR_PTR(ret); |
213 | } | 200 | } |
214 | 201 | ||
202 | ret = drm_gem_create_mmap_offset(obj); | ||
203 | if (ret < 0) { | ||
204 | drm_gem_object_release(obj); | ||
205 | kfree(exynos_gem_obj); | ||
206 | return ERR_PTR(ret); | ||
207 | } | ||
208 | |||
215 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); | 209 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); |
216 | 210 | ||
217 | return exynos_gem_obj; | 211 | return exynos_gem_obj; |
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | |||
313 | drm_gem_object_unreference_unlocked(obj); | 307 | drm_gem_object_unreference_unlocked(obj); |
314 | } | 308 | } |
315 | 309 | ||
316 | int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | 310 | static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, |
317 | struct vm_area_struct *vma) | 311 | struct vm_area_struct *vma) |
318 | { | 312 | { |
319 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; | 313 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; |
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | |||
342 | 336 | ||
343 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | 337 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, |
344 | struct drm_file *file_priv) | 338 | struct drm_file *file_priv) |
345 | { struct exynos_drm_gem_obj *exynos_gem_obj; | 339 | { |
340 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
346 | struct drm_exynos_gem_info *args = data; | 341 | struct drm_exynos_gem_info *args = data; |
347 | struct drm_gem_object *obj; | 342 | struct drm_gem_object *obj; |
348 | 343 | ||
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
402 | struct drm_mode_create_dumb *args) | 397 | struct drm_mode_create_dumb *args) |
403 | { | 398 | { |
404 | struct exynos_drm_gem_obj *exynos_gem_obj; | 399 | struct exynos_drm_gem_obj *exynos_gem_obj; |
400 | unsigned int flags; | ||
405 | int ret; | 401 | int ret; |
406 | 402 | ||
407 | /* | 403 | /* |
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
413 | args->pitch = args->width * ((args->bpp + 7) / 8); | 409 | args->pitch = args->width * ((args->bpp + 7) / 8); |
414 | args->size = args->pitch * args->height; | 410 | args->size = args->pitch * args->height; |
415 | 411 | ||
416 | if (is_drm_iommu_supported(dev)) { | 412 | if (is_drm_iommu_supported(dev)) |
417 | exynos_gem_obj = exynos_drm_gem_create(dev, | 413 | flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; |
418 | EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, | 414 | else |
419 | args->size); | 415 | flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; |
420 | } else { | ||
421 | exynos_gem_obj = exynos_drm_gem_create(dev, | ||
422 | EXYNOS_BO_CONTIG | EXYNOS_BO_WC, | ||
423 | args->size); | ||
424 | } | ||
425 | 416 | ||
417 | exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); | ||
426 | if (IS_ERR(exynos_gem_obj)) { | 418 | if (IS_ERR(exynos_gem_obj)) { |
427 | dev_warn(dev->dev, "FB allocation failed.\n"); | 419 | dev_warn(dev->dev, "FB allocation failed.\n"); |
428 | return PTR_ERR(exynos_gem_obj); | 420 | return PTR_ERR(exynos_gem_obj); |
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
460 | goto unlock; | 452 | goto unlock; |
461 | } | 453 | } |
462 | 454 | ||
463 | ret = drm_gem_create_mmap_offset(obj); | ||
464 | if (ret) | ||
465 | goto out; | ||
466 | |||
467 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | 455 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
468 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); | 456 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); |
469 | 457 | ||
470 | out: | ||
471 | drm_gem_object_unreference(obj); | 458 | drm_gem_object_unreference(obj); |
472 | unlock: | 459 | unlock: |
473 | mutex_unlock(&dev->struct_mutex); | 460 | mutex_unlock(&dev->struct_mutex); |
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
543 | 530 | ||
544 | err_close_vm: | 531 | err_close_vm: |
545 | drm_gem_vm_close(vma); | 532 | drm_gem_vm_close(vma); |
546 | drm_gem_free_mmap_offset(obj); | ||
547 | 533 | ||
548 | return ret; | 534 | return ret; |
549 | } | 535 | } |
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, | |||
588 | if (ret < 0) | 574 | if (ret < 0) |
589 | goto err_free_large; | 575 | goto err_free_large; |
590 | 576 | ||
577 | exynos_gem_obj->sgt = sgt; | ||
578 | |||
591 | if (sgt->nents == 1) { | 579 | if (sgt->nents == 1) { |
592 | /* always physically continuous memory if sgt->nents is 1. */ | 580 | /* always physically continuous memory if sgt->nents is 1. */ |
593 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; | 581 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -39,6 +39,7 @@ | |||
39 | * - this address could be physical address without IOMMU and | 39 | * - this address could be physical address without IOMMU and |
40 | * device address with IOMMU. | 40 | * device address with IOMMU. |
41 | * @pages: Array of backing pages. | 41 | * @pages: Array of backing pages. |
42 | * @sgt: Imported sg_table. | ||
42 | * | 43 | * |
43 | * P.S. this object would be transferred to user as kms_bo.handle so | 44 | * P.S. this object would be transferred to user as kms_bo.handle so |
44 | * user can access the buffer through kms_bo.handle. | 45 | * user can access the buffer through kms_bo.handle. |
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { | |||
52 | dma_addr_t dma_addr; | 53 | dma_addr_t dma_addr; |
53 | struct dma_attrs dma_attrs; | 54 | struct dma_attrs dma_attrs; |
54 | struct page **pages; | 55 | struct page **pages; |
56 | struct sg_table *sgt; | ||
55 | }; | 57 | }; |
56 | 58 | ||
57 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 59 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | |||
59 | /* destroy a buffer with gem object */ | 61 | /* destroy a buffer with gem object */ |
60 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); | 62 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); |
61 | 63 | ||
62 | /* create a private gem object and initialize it. */ | ||
63 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | ||
64 | unsigned long size); | ||
65 | |||
66 | /* create a new buffer with gem object */ | 64 | /* create a new buffer with gem object */ |
67 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | 65 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, |
68 | unsigned int flags, | 66 | unsigned int flags, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) | |||
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | 788 | ||
789 | #ifdef CONFIG_PM | ||
789 | static int rotator_clk_crtl(struct rot_context *rot, bool enable) | 790 | static int rotator_clk_crtl(struct rot_context *rot, bool enable) |
790 | { | 791 | { |
791 | if (enable) { | 792 | if (enable) { |
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) | |||
822 | } | 823 | } |
823 | #endif | 824 | #endif |
824 | 825 | ||
825 | #ifdef CONFIG_PM | ||
826 | static int rotator_runtime_suspend(struct device *dev) | 826 | static int rotator_runtime_suspend(struct device *dev) |
827 | { | 827 | { |
828 | struct rot_context *rot = dev_get_drvdata(dev); | 828 | struct rot_context *rot = dev_get_drvdata(dev); |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 82be6b86a168..d1e300dcd544 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | |||
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, | |||
58 | struct drm_plane_state *old_state) | 58 | struct drm_plane_state *old_state) |
59 | { | 59 | { |
60 | struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; | 60 | struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; |
61 | unsigned int index, value, ret; | 61 | unsigned int value; |
62 | int index, ret; | ||
62 | 63 | ||
63 | index = fsl_dcu_drm_plane_index(plane); | 64 | index = fsl_dcu_drm_plane_index(plane); |
64 | if (index < 0) | 65 | if (index < 0) |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5a244ab9395b..39d73dbc1c47 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) | |||
640 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 640 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
641 | 641 | ||
642 | /* | 642 | /* |
643 | * On HSW, the DSL reg (0x70000) appears to return 0 if we | ||
644 | * read it just before the start of vblank. So try it again | ||
645 | * so we don't accidentally end up spanning a vblank frame | ||
646 | * increment, causing the pipe_update_end() code to squak at us. | ||
647 | * | ||
648 | * The nature of this problem means we can't simply check the ISR | ||
649 | * bit and return the vblank start value; nor can we use the scanline | ||
650 | * debug register in the transcoder as it appears to have the same | ||
651 | * problem. We may need to extend this to include other platforms, | ||
652 | * but so far testing only shows the problem on HSW. | ||
653 | */ | ||
654 | if (IS_HASWELL(dev) && !position) { | ||
655 | int i, temp; | ||
656 | |||
657 | for (i = 0; i < 100; i++) { | ||
658 | udelay(1); | ||
659 | temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & | ||
660 | DSL_LINEMASK_GEN3; | ||
661 | if (temp != position) { | ||
662 | position = temp; | ||
663 | break; | ||
664 | } | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /* | ||
643 | * See update_scanline_offset() for the details on the | 669 | * See update_scanline_offset() for the details on the |
644 | * scanline_offset adjustment. | 670 | * scanline_offset adjustment. |
645 | */ | 671 | */ |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 89c1a8ce1f98..2a5c76faf9f8 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) | |||
430 | 430 | ||
431 | /** | 431 | /** |
432 | * intel_audio_codec_disable - Disable the audio codec for HD audio | 432 | * intel_audio_codec_disable - Disable the audio codec for HD audio |
433 | * @encoder: encoder on which to disable audio | 433 | * @intel_encoder: encoder on which to disable audio |
434 | * | 434 | * |
435 | * The disable sequences must be performed before disabling the transcoder or | 435 | * The disable sequences must be performed before disabling the transcoder or |
436 | * port. | 436 | * port. |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b3e437b3bb54..c19e669ffe50 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id) | |||
42 | const struct bdb_header *bdb = _bdb; | 42 | const struct bdb_header *bdb = _bdb; |
43 | const u8 *base = _bdb; | 43 | const u8 *base = _bdb; |
44 | int index = 0; | 44 | int index = 0; |
45 | u16 total, current_size; | 45 | u32 total, current_size; |
46 | u8 current_id; | 46 | u8 current_id; |
47 | 47 | ||
48 | /* skip to first section */ | 48 | /* skip to first section */ |
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id) | |||
57 | current_size = *((const u16 *)(base + index)); | 57 | current_size = *((const u16 *)(base + index)); |
58 | index += 2; | 58 | index += 2; |
59 | 59 | ||
60 | /* The MIPI Sequence Block v3+ has a separate size field. */ | ||
61 | if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) | ||
62 | current_size = *((const u32 *)(base + index + 1)); | ||
63 | |||
60 | if (index + current_size > total) | 64 | if (index + current_size > total) |
61 | return NULL; | 65 | return NULL; |
62 | 66 | ||
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) | |||
799 | return; | 803 | return; |
800 | } | 804 | } |
801 | 805 | ||
806 | /* Fail gracefully for forward incompatible sequence block. */ | ||
807 | if (sequence->version >= 3) { | ||
808 | DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); | ||
809 | return; | ||
810 | } | ||
811 | |||
802 | DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); | 812 | DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); |
803 | 813 | ||
804 | block_size = get_blocksize(sequence); | 814 | block_size = get_blocksize(sequence); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8cc9264f7809..cf418be7d30a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc, | |||
15087 | 15087 | ||
15088 | plane_state = to_intel_plane_state(p->base.state); | 15088 | plane_state = to_intel_plane_state(p->base.state); |
15089 | 15089 | ||
15090 | if (p->base.type == DRM_PLANE_TYPE_PRIMARY) | 15090 | if (p->base.type == DRM_PLANE_TYPE_PRIMARY) { |
15091 | plane_state->visible = primary_get_hw_state(crtc); | 15091 | plane_state->visible = primary_get_hw_state(crtc); |
15092 | else { | 15092 | if (plane_state->visible) |
15093 | crtc->base.state->plane_mask |= | ||
15094 | 1 << drm_plane_index(&p->base); | ||
15095 | } else { | ||
15093 | if (active) | 15096 | if (active) |
15094 | p->disable_plane(&p->base, &crtc->base); | 15097 | p->disable_plane(&p->base, &crtc->base); |
15095 | 15098 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo | |||
462 | drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); | 462 | drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); |
463 | 463 | ||
464 | drm_mode_connector_set_path_property(connector, pathprop); | 464 | drm_mode_connector_set_path_property(connector, pathprop); |
465 | return connector; | ||
466 | } | ||
467 | |||
468 | static void intel_dp_register_mst_connector(struct drm_connector *connector) | ||
469 | { | ||
470 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
471 | struct drm_device *dev = connector->dev; | ||
465 | drm_modeset_lock_all(dev); | 472 | drm_modeset_lock_all(dev); |
466 | intel_connector_add_to_fbdev(intel_connector); | 473 | intel_connector_add_to_fbdev(intel_connector); |
467 | drm_modeset_unlock_all(dev); | 474 | drm_modeset_unlock_all(dev); |
468 | drm_connector_register(&intel_connector->base); | 475 | drm_connector_register(&intel_connector->base); |
469 | return connector; | ||
470 | } | 476 | } |
471 | 477 | ||
472 | static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 478 | static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
512 | 518 | ||
513 | static struct drm_dp_mst_topology_cbs mst_cbs = { | 519 | static struct drm_dp_mst_topology_cbs mst_cbs = { |
514 | .add_connector = intel_dp_add_mst_connector, | 520 | .add_connector = intel_dp_add_mst_connector, |
521 | .register_connector = intel_dp_register_mst_connector, | ||
515 | .destroy_connector = intel_dp_destroy_mst_connector, | 522 | .destroy_connector = intel_dp_destroy_mst_connector, |
516 | .hotplug = intel_dp_mst_hotplug, | 523 | .hotplug = intel_dp_mst_hotplug, |
517 | }; | 524 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | |||
180 | 180 | ||
181 | /* Enable polling and queue hotplug re-enabling. */ | 181 | /* Enable polling and queue hotplug re-enabling. */ |
182 | if (hpd_disabled) { | 182 | if (hpd_disabled) { |
183 | drm_kms_helper_poll_enable(dev); | 183 | drm_kms_helper_poll_enable_locked(dev); |
184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, | 184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | 185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); |
186 | } | 186 | } |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..7412caedcf7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) | |||
484 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); | 484 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); |
485 | 485 | ||
486 | read_pointer = ring->next_context_status_buffer; | 486 | read_pointer = ring->next_context_status_buffer; |
487 | write_pointer = status_pointer & 0x07; | 487 | write_pointer = status_pointer & GEN8_CSB_PTR_MASK; |
488 | if (read_pointer > write_pointer) | 488 | if (read_pointer > write_pointer) |
489 | write_pointer += 6; | 489 | write_pointer += GEN8_CSB_ENTRIES; |
490 | 490 | ||
491 | spin_lock(&ring->execlist_lock); | 491 | spin_lock(&ring->execlist_lock); |
492 | 492 | ||
493 | while (read_pointer < write_pointer) { | 493 | while (read_pointer < write_pointer) { |
494 | read_pointer++; | 494 | read_pointer++; |
495 | status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + | 495 | status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
496 | (read_pointer % 6) * 8); | 496 | (read_pointer % GEN8_CSB_ENTRIES) * 8); |
497 | status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + | 497 | status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
498 | (read_pointer % 6) * 8 + 4); | 498 | (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); |
499 | 499 | ||
500 | if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) | 500 | if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) |
501 | continue; | 501 | continue; |
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) | |||
521 | spin_unlock(&ring->execlist_lock); | 521 | spin_unlock(&ring->execlist_lock); |
522 | 522 | ||
523 | WARN(submit_contexts > 2, "More than two context complete events?\n"); | 523 | WARN(submit_contexts > 2, "More than two context complete events?\n"); |
524 | ring->next_context_status_buffer = write_pointer % 6; | 524 | ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; |
525 | 525 | ||
526 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), | 526 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), |
527 | _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); | 527 | _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, |
528 | ((u32)ring->next_context_status_buffer & | ||
529 | GEN8_CSB_PTR_MASK) << 8)); | ||
528 | } | 530 | } |
529 | 531 | ||
530 | static int execlists_context_queue(struct drm_i915_gem_request *request) | 532 | static int execlists_context_queue(struct drm_i915_gem_request *request) |
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) | |||
1422 | { | 1424 | { |
1423 | struct drm_device *dev = ring->dev; | 1425 | struct drm_device *dev = ring->dev; |
1424 | struct drm_i915_private *dev_priv = dev->dev_private; | 1426 | struct drm_i915_private *dev_priv = dev->dev_private; |
1427 | u8 next_context_status_buffer_hw; | ||
1425 | 1428 | ||
1426 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); | 1429 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
1427 | I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); | 1430 | I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); |
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) | |||
1436 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | | 1439 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | |
1437 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); | 1440 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); |
1438 | POSTING_READ(RING_MODE_GEN7(ring)); | 1441 | POSTING_READ(RING_MODE_GEN7(ring)); |
1439 | ring->next_context_status_buffer = 0; | 1442 | |
1443 | /* | ||
1444 | * Instead of resetting the Context Status Buffer (CSB) read pointer to | ||
1445 | * zero, we need to read the write pointer from hardware and use its | ||
1446 | * value because "this register is power context save restored". | ||
1447 | * Effectively, these states have been observed: | ||
1448 | * | ||
1449 | * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | | ||
1450 | * BDW | CSB regs not reset | CSB regs reset | | ||
1451 | * CHT | CSB regs not reset | CSB regs not reset | | ||
1452 | */ | ||
1453 | next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) | ||
1454 | & GEN8_CSB_PTR_MASK); | ||
1455 | |||
1456 | /* | ||
1457 | * When the CSB registers are reset (also after power-up / gpu reset), | ||
1458 | * CSB write pointer is set to all 1's, which is not valid, use '5' in | ||
1459 | * this special case, so the first element read is CSB[0]. | ||
1460 | */ | ||
1461 | if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) | ||
1462 | next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); | ||
1463 | |||
1464 | ring->next_context_status_buffer = next_context_status_buffer_hw; | ||
1440 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); | 1465 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); |
1441 | 1466 | ||
1442 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 1467 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -25,6 +25,8 @@ | |||
25 | #define _INTEL_LRC_H_ | 25 | #define _INTEL_LRC_H_ |
26 | 26 | ||
27 | #define GEN8_LR_CONTEXT_ALIGN 4096 | 27 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
28 | #define GEN8_CSB_ENTRIES 6 | ||
29 | #define GEN8_CSB_PTR_MASK 0x07 | ||
28 | 30 | ||
29 | /* Execlists regs */ | 31 | /* Execlists regs */ |
30 | #define RING_ELSP(ring) ((ring)->mmio_base+0x230) | 32 | #define RING_ELSP(ring) ((ring)->mmio_base+0x230) |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, | |||
246 | } | 246 | } |
247 | 247 | ||
248 | if (power_well->data == SKL_DISP_PW_1) { | 248 | if (power_well->data == SKL_DISP_PW_1) { |
249 | intel_prepare_ddi(dev); | 249 | if (!dev_priv->power_domains.initializing) |
250 | intel_prepare_ddi(dev); | ||
250 | gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); | 251 | gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); |
251 | } | 252 | } |
252 | } | 253 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 87de15ea1f93..b35b5b2db4ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c | |||
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper, | |||
186 | 186 | ||
187 | sysram = vmalloc(size); | 187 | sysram = vmalloc(size); |
188 | if (!sysram) | 188 | if (!sysram) |
189 | return -ENOMEM; | 189 | goto err_sysram; |
190 | 190 | ||
191 | info = drm_fb_helper_alloc_fbi(helper); | 191 | info = drm_fb_helper_alloc_fbi(helper); |
192 | if (IS_ERR(info)) | 192 | if (IS_ERR(info)) { |
193 | return PTR_ERR(info); | 193 | ret = PTR_ERR(info); |
194 | goto err_alloc_fbi; | ||
195 | } | ||
194 | 196 | ||
195 | info->par = mfbdev; | 197 | info->par = mfbdev; |
196 | 198 | ||
197 | ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); | 199 | ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); |
198 | if (ret) | 200 | if (ret) |
199 | return ret; | 201 | goto err_framebuffer_init; |
200 | 202 | ||
201 | mfbdev->sysram = sysram; | 203 | mfbdev->sysram = sysram; |
202 | mfbdev->size = size; | 204 | mfbdev->size = size; |
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper, | |||
225 | 227 | ||
226 | DRM_DEBUG_KMS("allocated %dx%d\n", | 228 | DRM_DEBUG_KMS("allocated %dx%d\n", |
227 | fb->width, fb->height); | 229 | fb->width, fb->height); |
230 | |||
228 | return 0; | 231 | return 0; |
232 | |||
233 | err_framebuffer_init: | ||
234 | drm_fb_helper_release_fbi(helper); | ||
235 | err_alloc_fbi: | ||
236 | vfree(sysram); | ||
237 | err_sysram: | ||
238 | drm_gem_object_unreference_unlocked(gobj); | ||
239 | |||
240 | return ret; | ||
229 | } | 241 | } |
230 | 242 | ||
231 | static int mga_fbdev_destroy(struct drm_device *dev, | 243 | static int mga_fbdev_destroy(struct drm_device *dev, |
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev) | |||
276 | ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, | 288 | ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, |
277 | mdev->num_crtc, MGAG200FB_CONN_LIMIT); | 289 | mdev->num_crtc, MGAG200FB_CONN_LIMIT); |
278 | if (ret) | 290 | if (ret) |
279 | return ret; | 291 | goto err_fb_helper; |
280 | 292 | ||
281 | ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); | 293 | ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); |
282 | if (ret) | 294 | if (ret) |
283 | goto fini; | 295 | goto err_fb_setup; |
284 | 296 | ||
285 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 297 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
286 | drm_helper_disable_unused_functions(mdev->dev); | 298 | drm_helper_disable_unused_functions(mdev->dev); |
287 | 299 | ||
288 | ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); | 300 | ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); |
289 | if (ret) | 301 | if (ret) |
290 | goto fini; | 302 | goto err_fb_setup; |
291 | 303 | ||
292 | return 0; | 304 | return 0; |
293 | 305 | ||
294 | fini: | 306 | err_fb_setup: |
295 | drm_fb_helper_fini(&mfbdev->helper); | 307 | drm_fb_helper_fini(&mfbdev->helper); |
308 | err_fb_helper: | ||
309 | mdev->mfbdev = NULL; | ||
310 | |||
296 | return ret; | 311 | return ret; |
297 | } | 312 | } |
298 | 313 | ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index de06388069e7..b1a0f5656175 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c | |||
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) | |||
220 | } | 220 | } |
221 | r = mgag200_mm_init(mdev); | 221 | r = mgag200_mm_init(mdev); |
222 | if (r) | 222 | if (r) |
223 | goto out; | 223 | goto err_mm; |
224 | 224 | ||
225 | drm_mode_config_init(dev); | 225 | drm_mode_config_init(dev); |
226 | dev->mode_config.funcs = (void *)&mga_mode_funcs; | 226 | dev->mode_config.funcs = (void *)&mga_mode_funcs; |
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) | |||
233 | r = mgag200_modeset_init(mdev); | 233 | r = mgag200_modeset_init(mdev); |
234 | if (r) { | 234 | if (r) { |
235 | dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); | 235 | dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); |
236 | goto out; | 236 | goto err_modeset; |
237 | } | 237 | } |
238 | 238 | ||
239 | /* Make small buffers to store a hardware cursor (double buffered icon updates) */ | 239 | /* Make small buffers to store a hardware cursor (double buffered icon updates) */ |
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) | |||
241 | &mdev->cursor.pixels_1); | 241 | &mdev->cursor.pixels_1); |
242 | mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, | 242 | mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, |
243 | &mdev->cursor.pixels_2); | 243 | &mdev->cursor.pixels_2); |
244 | if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) | 244 | if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) { |
245 | goto cursor_nospace; | 245 | mdev->cursor.pixels_1 = NULL; |
246 | mdev->cursor.pixels_current = mdev->cursor.pixels_1; | 246 | mdev->cursor.pixels_2 = NULL; |
247 | mdev->cursor.pixels_prev = mdev->cursor.pixels_2; | 247 | dev_warn(&dev->pdev->dev, |
248 | goto cursor_done; | 248 | "Could not allocate space for cursors. Not doing hardware cursors.\n"); |
249 | cursor_nospace: | 249 | } else { |
250 | mdev->cursor.pixels_1 = NULL; | 250 | mdev->cursor.pixels_current = mdev->cursor.pixels_1; |
251 | mdev->cursor.pixels_2 = NULL; | 251 | mdev->cursor.pixels_prev = mdev->cursor.pixels_2; |
252 | dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); | 252 | } |
253 | cursor_done: | 253 | |
254 | 254 | return 0; | |
255 | out: | 255 | |
256 | if (r) | 256 | err_modeset: |
257 | mgag200_driver_unload(dev); | 257 | drm_mode_config_cleanup(dev); |
258 | mgag200_mm_fini(mdev); | ||
259 | err_mm: | ||
260 | dev->dev_private = NULL; | ||
261 | |||
258 | return r; | 262 | return r; |
259 | } | 263 | } |
260 | 264 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index b1f73bee1368..b0d4b53b97f4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -178,7 +178,6 @@ static int mdp5_hw_irqdomain_map(struct irq_domain *d, | |||
178 | 178 | ||
179 | irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); | 179 | irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); |
180 | irq_set_chip_data(irq, mdp5_kms); | 180 | irq_set_chip_data(irq, mdp5_kms); |
181 | set_irq_flags(irq, IRQF_VALID); | ||
182 | 181 | ||
183 | return 0; | 182 | return 0; |
184 | } | 183 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cc6c228e11c8..e905c00acf1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev) | |||
469 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { | 469 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
470 | dev->mode_config.max_width = 4096; | 470 | dev->mode_config.max_width = 4096; |
471 | dev->mode_config.max_height = 4096; | 471 | dev->mode_config.max_height = 4096; |
472 | } else { | 472 | } else |
473 | if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { | ||
473 | dev->mode_config.max_width = 8192; | 474 | dev->mode_config.max_width = 8192; |
474 | dev->mode_config.max_height = 8192; | 475 | dev->mode_config.max_height = 8192; |
476 | } else { | ||
477 | dev->mode_config.max_width = 16384; | ||
478 | dev->mode_config.max_height = 16384; | ||
475 | } | 479 | } |
476 | 480 | ||
477 | dev->mode_config.preferred_depth = 24; | 481 | dev->mode_config.preferred_depth = 24; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2791701685dc..59f27e774acb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
178 | return 0; | 178 | return 0; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int | ||
182 | nouveau_fbcon_open(struct fb_info *info, int user) | ||
183 | { | ||
184 | struct nouveau_fbdev *fbcon = info->par; | ||
185 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); | ||
186 | int ret = pm_runtime_get_sync(drm->dev->dev); | ||
187 | if (ret < 0 && ret != -EACCES) | ||
188 | return ret; | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int | ||
193 | nouveau_fbcon_release(struct fb_info *info, int user) | ||
194 | { | ||
195 | struct nouveau_fbdev *fbcon = info->par; | ||
196 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); | ||
197 | pm_runtime_put(drm->dev->dev); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
181 | static struct fb_ops nouveau_fbcon_ops = { | 201 | static struct fb_ops nouveau_fbcon_ops = { |
182 | .owner = THIS_MODULE, | 202 | .owner = THIS_MODULE, |
203 | .fb_open = nouveau_fbcon_open, | ||
204 | .fb_release = nouveau_fbcon_release, | ||
183 | .fb_check_var = drm_fb_helper_check_var, | 205 | .fb_check_var = drm_fb_helper_check_var, |
184 | .fb_set_par = drm_fb_helper_set_par, | 206 | .fb_set_par = drm_fb_helper_set_par, |
185 | .fb_fillrect = nouveau_fbcon_fillrect, | 207 | .fb_fillrect = nouveau_fbcon_fillrect, |
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
195 | 217 | ||
196 | static struct fb_ops nouveau_fbcon_sw_ops = { | 218 | static struct fb_ops nouveau_fbcon_sw_ops = { |
197 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
220 | .fb_open = nouveau_fbcon_open, | ||
221 | .fb_release = nouveau_fbcon_release, | ||
198 | .fb_check_var = drm_fb_helper_check_var, | 222 | .fb_check_var = drm_fb_helper_check_var, |
199 | .fb_set_par = drm_fb_helper_set_par, | 223 | .fb_set_par = drm_fb_helper_set_par, |
200 | .fb_fillrect = drm_fb_helper_cfb_fillrect, | 224 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 65af31441e9c..a7d69ce7abc1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c | |||
@@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index) | |||
267 | index = NVKM_I2C_BUS_PRI; | 267 | index = NVKM_I2C_BUS_PRI; |
268 | if (init->outp && init->outp->i2c_upper_default) | 268 | if (init->outp && init->outp->i2c_upper_default) |
269 | index = NVKM_I2C_BUS_SEC; | 269 | index = NVKM_I2C_BUS_SEC; |
270 | } else | ||
271 | if (index == 0x80) { | ||
272 | index = NVKM_I2C_BUS_PRI; | ||
273 | } else | ||
274 | if (index == 0x81) { | ||
275 | index = NVKM_I2C_BUS_SEC; | ||
270 | } | 276 | } |
271 | 277 | ||
272 | bus = nvkm_i2c_bus_find(i2c, index); | 278 | bus = nvkm_i2c_bus_find(i2c, index); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h index e0ec2a6b7b79..212800ecdce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h | |||
@@ -8,7 +8,10 @@ struct nvbios_source { | |||
8 | void *(*init)(struct nvkm_bios *, const char *); | 8 | void *(*init)(struct nvkm_bios *, const char *); |
9 | void (*fini)(void *); | 9 | void (*fini)(void *); |
10 | u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); | 10 | u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); |
11 | u32 (*size)(void *); | ||
11 | bool rw; | 12 | bool rw; |
13 | bool ignore_checksum; | ||
14 | bool no_pcir; | ||
12 | }; | 15 | }; |
13 | 16 | ||
14 | int nvbios_extend(struct nvkm_bios *, u32 length); | 17 | int nvbios_extend(struct nvkm_bios *, u32 length); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 792f017525f6..b2557e87afdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | |||
@@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) | |||
45 | u32 read = mthd->func->read(data, start, limit - start, bios); | 45 | u32 read = mthd->func->read(data, start, limit - start, bios); |
46 | bios->size = start + read; | 46 | bios->size = start + read; |
47 | } | 47 | } |
48 | return bios->size >= limit; | 48 | return bios->size >= upto; |
49 | } | 49 | } |
50 | 50 | ||
51 | static int | 51 | static int |
@@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) | |||
55 | struct nvbios_image image; | 55 | struct nvbios_image image; |
56 | int score = 1; | 56 | int score = 1; |
57 | 57 | ||
58 | if (!shadow_fetch(bios, mthd, offset + 0x1000)) { | 58 | if (mthd->func->no_pcir) { |
59 | nvkm_debug(subdev, "%08x: header fetch failed\n", offset); | 59 | image.base = 0; |
60 | return 0; | 60 | image.type = 0; |
61 | } | 61 | image.size = mthd->func->size(mthd->data); |
62 | image.last = 1; | ||
63 | } else { | ||
64 | if (!shadow_fetch(bios, mthd, offset + 0x1000)) { | ||
65 | nvkm_debug(subdev, "%08x: header fetch failed\n", | ||
66 | offset); | ||
67 | return 0; | ||
68 | } | ||
62 | 69 | ||
63 | if (!nvbios_image(bios, idx, &image)) { | 70 | if (!nvbios_image(bios, idx, &image)) { |
64 | nvkm_debug(subdev, "image %d invalid\n", idx); | 71 | nvkm_debug(subdev, "image %d invalid\n", idx); |
65 | return 0; | 72 | return 0; |
73 | } | ||
66 | } | 74 | } |
67 | nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", | 75 | nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", |
68 | image.base, image.type, image.size); | 76 | image.base, image.type, image.size); |
@@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) | |||
74 | 82 | ||
75 | switch (image.type) { | 83 | switch (image.type) { |
76 | case 0x00: | 84 | case 0x00: |
77 | if (nvbios_checksum(&bios->data[image.base], image.size)) { | 85 | if (!mthd->func->ignore_checksum && |
86 | nvbios_checksum(&bios->data[image.base], image.size)) { | ||
78 | nvkm_debug(subdev, "%08x: checksum failed\n", | 87 | nvkm_debug(subdev, "%08x: checksum failed\n", |
79 | image.base); | 88 | image.base); |
80 | if (mthd->func->rw) | 89 | if (mthd->func->rw) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index bd60d7dd09f5..4bf486b57101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | #include "priv.h" | 23 | #include "priv.h" |
24 | |||
24 | #include <core/pci.h> | 25 | #include <core/pci.h> |
25 | 26 | ||
26 | #if defined(__powerpc__) | 27 | #if defined(__powerpc__) |
@@ -33,17 +34,26 @@ static u32 | |||
33 | of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) | 34 | of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) |
34 | { | 35 | { |
35 | struct priv *priv = data; | 36 | struct priv *priv = data; |
36 | if (offset + length <= priv->size) { | 37 | if (offset < priv->size) { |
38 | length = min_t(u32, length, priv->size - offset); | ||
37 | memcpy_fromio(bios->data + offset, priv->data + offset, length); | 39 | memcpy_fromio(bios->data + offset, priv->data + offset, length); |
38 | return length; | 40 | return length; |
39 | } | 41 | } |
40 | return 0; | 42 | return 0; |
41 | } | 43 | } |
42 | 44 | ||
45 | static u32 | ||
46 | of_size(void *data) | ||
47 | { | ||
48 | struct priv *priv = data; | ||
49 | return priv->size; | ||
50 | } | ||
51 | |||
43 | static void * | 52 | static void * |
44 | of_init(struct nvkm_bios *bios, const char *name) | 53 | of_init(struct nvkm_bios *bios, const char *name) |
45 | { | 54 | { |
46 | struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev; | 55 | struct nvkm_device *device = bios->subdev.device; |
56 | struct pci_dev *pdev = device->func->pci(device)->pdev; | ||
47 | struct device_node *dn; | 57 | struct device_node *dn; |
48 | struct priv *priv; | 58 | struct priv *priv; |
49 | if (!(dn = pci_device_to_OF_node(pdev))) | 59 | if (!(dn = pci_device_to_OF_node(pdev))) |
@@ -62,7 +72,10 @@ nvbios_of = { | |||
62 | .init = of_init, | 72 | .init = of_init, |
63 | .fini = (void(*)(void *))kfree, | 73 | .fini = (void(*)(void *))kfree, |
64 | .read = of_read, | 74 | .read = of_read, |
75 | .size = of_size, | ||
65 | .rw = false, | 76 | .rw = false, |
77 | .ignore_checksum = true, | ||
78 | .no_pcir = true, | ||
66 | }; | 79 | }; |
67 | #else | 80 | #else |
68 | const struct nvbios_source | 81 | const struct nvbios_source |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c index 814cb51cc873..385a90f91ed6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c | |||
@@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk | |||
35 | nvkm_device_agp_quirks[] = { | 35 | nvkm_device_agp_quirks[] = { |
36 | /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ | 36 | /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ |
37 | { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, | 37 | { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, |
38 | /* SiS 761 does not support AGP cards, use PCI mode */ | ||
39 | { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 }, | ||
38 | {}, | 40 | {}, |
39 | }; | 41 | }; |
40 | 42 | ||
@@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci) | |||
137 | while (quirk->hostbridge_vendor) { | 139 | while (quirk->hostbridge_vendor) { |
138 | if (info.device->vendor == quirk->hostbridge_vendor && | 140 | if (info.device->vendor == quirk->hostbridge_vendor && |
139 | info.device->device == quirk->hostbridge_device && | 141 | info.device->device == quirk->hostbridge_device && |
140 | pci->pdev->vendor == quirk->chip_vendor && | 142 | (quirk->chip_vendor == (u16)PCI_ANY_ID || |
141 | pci->pdev->device == quirk->chip_device) { | 143 | pci->pdev->vendor == quirk->chip_vendor) && |
144 | (quirk->chip_device == (u16)PCI_ANY_ID || | ||
145 | pci->pdev->device == quirk->chip_device)) { | ||
142 | nvkm_info(subdev, "forcing default agp mode to %dX, " | 146 | nvkm_info(subdev, "forcing default agp mode to %dX, " |
143 | "use NvAGP=<mode> to override\n", | 147 | "use NvAGP=<mode> to override\n", |
144 | quirk->mode); | 148 | quirk->mode); |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7c6225c84ba6..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -244,6 +244,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, | |||
244 | ret = qxl_bo_reserve(bo, false); | 244 | ret = qxl_bo_reserve(bo, false); |
245 | if (ret) | 245 | if (ret) |
246 | return ret; | 246 | return ret; |
247 | ret = qxl_bo_pin(bo, bo->type, NULL); | ||
248 | qxl_bo_unreserve(bo); | ||
249 | if (ret) | ||
250 | return ret; | ||
247 | 251 | ||
248 | qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, | 252 | qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, |
249 | &norect, one_clip_rect, inc); | 253 | &norect, one_clip_rect, inc); |
@@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, | |||
257 | } | 261 | } |
258 | drm_vblank_put(dev, qcrtc->index); | 262 | drm_vblank_put(dev, qcrtc->index); |
259 | 263 | ||
260 | qxl_bo_unreserve(bo); | 264 | ret = qxl_bo_reserve(bo, false); |
265 | if (!ret) { | ||
266 | qxl_bo_unpin(bo); | ||
267 | qxl_bo_unreserve(bo); | ||
268 | } | ||
261 | 269 | ||
262 | return 0; | 270 | return 0; |
263 | } | 271 | } |
@@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, | |||
618 | adjusted_mode->hdisplay, | 626 | adjusted_mode->hdisplay, |
619 | adjusted_mode->vdisplay); | 627 | adjusted_mode->vdisplay); |
620 | 628 | ||
621 | if (qcrtc->index == 0) | 629 | if (bo->is_primary == false) |
622 | recreate_primary = true; | 630 | recreate_primary = true; |
623 | 631 | ||
624 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { | 632 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { |
@@ -886,13 +894,15 @@ static enum drm_connector_status qxl_conn_detect( | |||
886 | drm_connector_to_qxl_output(connector); | 894 | drm_connector_to_qxl_output(connector); |
887 | struct drm_device *ddev = connector->dev; | 895 | struct drm_device *ddev = connector->dev; |
888 | struct qxl_device *qdev = ddev->dev_private; | 896 | struct qxl_device *qdev = ddev->dev_private; |
889 | int connected; | 897 | bool connected = false; |
890 | 898 | ||
891 | /* The first monitor is always connected */ | 899 | /* The first monitor is always connected */ |
892 | connected = (output->index == 0) || | 900 | if (!qdev->client_monitors_config) { |
893 | (qdev->client_monitors_config && | 901 | if (output->index == 0) |
894 | qdev->client_monitors_config->count > output->index && | 902 | connected = true; |
895 | qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); | 903 | } else |
904 | connected = qdev->client_monitors_config->count > output->index && | ||
905 | qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); | ||
896 | 906 | ||
897 | DRM_DEBUG("#%d connected: %d\n", output->index, connected); | 907 | DRM_DEBUG("#%d connected: %d\n", output->index, connected); |
898 | if (!connected) | 908 | if (!connected) |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 41c422fee31a..c4a552637c93 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev, | |||
144 | 144 | ||
145 | spin_lock_irqsave(&qfbdev->dirty.lock, flags); | 145 | spin_lock_irqsave(&qfbdev->dirty.lock, flags); |
146 | 146 | ||
147 | if (qfbdev->dirty.y1 < y) | 147 | if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) && |
148 | y = qfbdev->dirty.y1; | 148 | (qfbdev->dirty.x2 - qfbdev->dirty.x1)) { |
149 | if (qfbdev->dirty.y2 > y2) | 149 | if (qfbdev->dirty.y1 < y) |
150 | y2 = qfbdev->dirty.y2; | 150 | y = qfbdev->dirty.y1; |
151 | if (qfbdev->dirty.x1 < x) | 151 | if (qfbdev->dirty.y2 > y2) |
152 | x = qfbdev->dirty.x1; | 152 | y2 = qfbdev->dirty.y2; |
153 | if (qfbdev->dirty.x2 > x2) | 153 | if (qfbdev->dirty.x1 < x) |
154 | x2 = qfbdev->dirty.x2; | 154 | x = qfbdev->dirty.x1; |
155 | if (qfbdev->dirty.x2 > x2) | ||
156 | x2 = qfbdev->dirty.x2; | ||
157 | } | ||
155 | 158 | ||
156 | qfbdev->dirty.x1 = x; | 159 | qfbdev->dirty.x1 = x; |
157 | qfbdev->dirty.x2 = x2; | 160 | qfbdev->dirty.x2 = x2; |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b66ec331c17c..4efa8e261baf 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
307 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | 307 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); |
308 | if (idr_ret < 0) | 308 | if (idr_ret < 0) |
309 | return idr_ret; | 309 | return idr_ret; |
310 | bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); | 310 | bo = to_qxl_bo(entry->tv.bo); |
311 | 311 | ||
312 | (*release)->release_offset = create_rel->release_offset + 64; | 312 | (*release)->release_offset = create_rel->release_offset + 64; |
313 | 313 | ||
@@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
316 | info = qxl_release_map(qdev, *release); | 316 | info = qxl_release_map(qdev, *release); |
317 | info->id = idr_ret; | 317 | info->id = idr_ret; |
318 | qxl_release_unmap(qdev, *release, info); | 318 | qxl_release_unmap(qdev, *release, info); |
319 | |||
320 | qxl_bo_unref(&bo); | ||
321 | return 0; | 319 | return 0; |
322 | } | 320 | } |
323 | 321 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) | |||
1624 | } else | 1624 | } else |
1625 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1625 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1626 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1626 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1627 | args.ucAction = ATOM_LCD_BLON; | 1627 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
1628 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1628 | |
1629 | atombios_set_backlight_level(radeon_encoder, dig->backlight_level); | ||
1629 | } | 1630 | } |
1630 | break; | 1631 | break; |
1631 | case DRM_MODE_DPMS_STANDBY: | 1632 | case DRM_MODE_DPMS_STANDBY: |
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1706 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | 1707 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); |
1707 | } | 1708 | } |
1708 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 1709 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
1709 | atombios_dig_transmitter_setup(encoder, | 1710 | atombios_set_backlight_level(radeon_encoder, dig->backlight_level); |
1710 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1711 | if (ext_encoder) | 1711 | if (ext_encoder) |
1712 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | 1712 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); |
1713 | break; | 1713 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d8319dae8358..f3f562f6d848 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1573 | 1573 | ||
1574 | drm_kms_helper_poll_disable(dev); | 1574 | drm_kms_helper_poll_disable(dev); |
1575 | 1575 | ||
1576 | drm_modeset_lock_all(dev); | ||
1576 | /* turn off display hw */ | 1577 | /* turn off display hw */ |
1577 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1578 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1578 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 1579 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
1579 | } | 1580 | } |
1581 | drm_modeset_unlock_all(dev); | ||
1580 | 1582 | ||
1581 | /* unpin the front buffers and cursors */ | 1583 | /* unpin the front buffers and cursors */ |
1582 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 1584 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1734 | if (fbcon) { | 1736 | if (fbcon) { |
1735 | drm_helper_resume_force_mode(dev); | 1737 | drm_helper_resume_force_mode(dev); |
1736 | /* turn on display hw */ | 1738 | /* turn on display hw */ |
1739 | drm_modeset_lock_all(dev); | ||
1737 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1740 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1738 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 1741 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
1739 | } | 1742 | } |
1743 | drm_modeset_unlock_all(dev); | ||
1740 | } | 1744 | } |
1741 | 1745 | ||
1742 | drm_kms_helper_poll_enable(dev); | 1746 | drm_kms_helper_poll_enable(dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d2e9e9efc159..6743174acdbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1633 | radeon_fbdev_init(rdev); | 1633 | radeon_fbdev_init(rdev); |
1634 | drm_kms_helper_poll_init(rdev->ddev); | 1634 | drm_kms_helper_poll_init(rdev->ddev); |
1635 | 1635 | ||
1636 | if (rdev->pm.dpm_enabled) { | 1636 | /* do pm late init */ |
1637 | /* do dpm late init */ | 1637 | ret = radeon_pm_late_init(rdev); |
1638 | ret = radeon_pm_late_init(rdev); | ||
1639 | if (ret) { | ||
1640 | rdev->pm.dpm_enabled = false; | ||
1641 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
1642 | } | ||
1643 | /* set the dpm state for PX since there won't be | ||
1644 | * a modeset to call this. | ||
1645 | */ | ||
1646 | radeon_pm_compute_clocks(rdev); | ||
1647 | } | ||
1648 | 1638 | ||
1649 | return 0; | 1639 | return 0; |
1650 | } | 1640 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..744f5c49c664 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c | |||
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol | |||
265 | { | 265 | { |
266 | struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); | 266 | struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); |
267 | struct drm_device *dev = master->base.dev; | 267 | struct drm_device *dev = master->base.dev; |
268 | struct radeon_device *rdev = dev->dev_private; | ||
269 | struct radeon_connector *radeon_connector; | 268 | struct radeon_connector *radeon_connector; |
270 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
271 | 270 | ||
@@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol | |||
284 | radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); | 283 | radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); |
285 | 284 | ||
286 | drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); | 285 | drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); |
286 | drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); | ||
287 | drm_mode_connector_set_path_property(connector, pathprop); | 287 | drm_mode_connector_set_path_property(connector, pathprop); |
288 | 288 | ||
289 | return connector; | ||
290 | } | ||
291 | |||
292 | static void radeon_dp_register_mst_connector(struct drm_connector *connector) | ||
293 | { | ||
294 | struct drm_device *dev = connector->dev; | ||
295 | struct radeon_device *rdev = dev->dev_private; | ||
296 | |||
289 | drm_modeset_lock_all(dev); | 297 | drm_modeset_lock_all(dev); |
290 | radeon_fb_add_connector(rdev, connector); | 298 | radeon_fb_add_connector(rdev, connector); |
291 | drm_modeset_unlock_all(dev); | 299 | drm_modeset_unlock_all(dev); |
292 | 300 | ||
293 | drm_connector_register(connector); | 301 | drm_connector_register(connector); |
294 | return connector; | ||
295 | } | 302 | } |
296 | 303 | ||
297 | static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 304 | static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
@@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
324 | 331 | ||
325 | struct drm_dp_mst_topology_cbs mst_cbs = { | 332 | struct drm_dp_mst_topology_cbs mst_cbs = { |
326 | .add_connector = radeon_dp_add_mst_connector, | 333 | .add_connector = radeon_dp_add_mst_connector, |
334 | .register_connector = radeon_dp_register_mst_connector, | ||
327 | .destroy_connector = radeon_dp_destroy_mst_connector, | 335 | .destroy_connector = radeon_dp_destroy_mst_connector, |
328 | .hotplug = radeon_dp_mst_hotplug, | 336 | .hotplug = radeon_dp_mst_hotplug, |
329 | }; | 337 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..26da2f4d7b4f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -48,40 +48,10 @@ struct radeon_fbdev { | |||
48 | struct radeon_device *rdev; | 48 | struct radeon_device *rdev; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | /** | ||
52 | * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. | ||
53 | * | ||
54 | * @info: fbdev info | ||
55 | * | ||
56 | * This function hides the cursor on all CRTCs used by fbdev. | ||
57 | */ | ||
58 | static int radeon_fb_helper_set_par(struct fb_info *info) | ||
59 | { | ||
60 | int ret; | ||
61 | |||
62 | ret = drm_fb_helper_set_par(info); | ||
63 | |||
64 | /* XXX: with universal plane support fbdev will automatically disable | ||
65 | * all non-primary planes (including the cursor) | ||
66 | */ | ||
67 | if (ret == 0) { | ||
68 | struct drm_fb_helper *fb_helper = info->par; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
72 | struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; | ||
73 | |||
74 | radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static struct fb_ops radeonfb_ops = { | 51 | static struct fb_ops radeonfb_ops = { |
82 | .owner = THIS_MODULE, | 52 | .owner = THIS_MODULE, |
83 | .fb_check_var = drm_fb_helper_check_var, | 53 | .fb_check_var = drm_fb_helper_check_var, |
84 | .fb_set_par = radeon_fb_helper_set_par, | 54 | .fb_set_par = drm_fb_helper_set_par, |
85 | .fb_fillrect = drm_fb_helper_cfb_fillrect, | 55 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
86 | .fb_copyarea = drm_fb_helper_cfb_copyarea, | 56 | .fb_copyarea = drm_fb_helper_cfb_copyarea, |
87 | .fb_imageblit = drm_fb_helper_cfb_imageblit, | 57 | .fb_imageblit = drm_fb_helper_cfb_imageblit, |
@@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector | |||
427 | { | 397 | { |
428 | drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); | 398 | drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); |
429 | } | 399 | } |
400 | |||
401 | void radeon_fbdev_restore_mode(struct radeon_device *rdev) | ||
402 | { | ||
403 | struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev; | ||
404 | struct drm_fb_helper *fb_helper; | ||
405 | int ret; | ||
406 | |||
407 | if (!rfbdev) | ||
408 | return; | ||
409 | |||
410 | fb_helper = &rfbdev->helper; | ||
411 | |||
412 | ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); | ||
413 | if (ret) | ||
414 | DRM_DEBUG("failed to restore crtc mode\n"); | ||
415 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4a119c255ba9..0e932bf932c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
598 | * Outdated mess for old drm with Xorg being in charge (void function now). | 598 | * Outdated mess for old drm with Xorg being in charge (void function now). |
599 | */ | 599 | */ |
600 | /** | 600 | /** |
601 | * radeon_driver_firstopen_kms - drm callback for last close | 601 | * radeon_driver_lastclose_kms - drm callback for last close |
602 | * | 602 | * |
603 | * @dev: drm dev pointer | 603 | * @dev: drm dev pointer |
604 | * | 604 | * |
@@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
606 | */ | 606 | */ |
607 | void radeon_driver_lastclose_kms(struct drm_device *dev) | 607 | void radeon_driver_lastclose_kms(struct drm_device *dev) |
608 | { | 608 | { |
609 | struct radeon_device *rdev = dev->dev_private; | ||
610 | |||
611 | radeon_fbdev_restore_mode(rdev); | ||
609 | vga_switcheroo_process_delayed_switch(); | 612 | vga_switcheroo_process_delayed_switch(); |
610 | } | 613 | } |
611 | 614 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index aecc3e3dec0c..457b026a0972 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev); | |||
980 | void radeon_fbdev_fini(struct radeon_device *rdev); | 980 | void radeon_fbdev_fini(struct radeon_device *rdev); |
981 | void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); | 981 | void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); |
982 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); | 982 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); |
983 | void radeon_fbdev_restore_mode(struct radeon_device *rdev); | ||
983 | 984 | ||
984 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); | 985 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); |
985 | 986 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 05751f3f8444..44489cce7458 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1326,14 +1326,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) | |||
1326 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | 1326 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); |
1327 | 1327 | ||
1328 | if (rdev->pm.num_power_states > 1) { | 1328 | if (rdev->pm.num_power_states > 1) { |
1329 | /* where's the best place to put these? */ | ||
1330 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | ||
1331 | if (ret) | ||
1332 | DRM_ERROR("failed to create device file for power profile\n"); | ||
1333 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | ||
1334 | if (ret) | ||
1335 | DRM_ERROR("failed to create device file for power method\n"); | ||
1336 | |||
1337 | if (radeon_debugfs_pm_init(rdev)) { | 1329 | if (radeon_debugfs_pm_init(rdev)) { |
1338 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 1330 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
1339 | } | 1331 | } |
@@ -1391,20 +1383,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) | |||
1391 | goto dpm_failed; | 1383 | goto dpm_failed; |
1392 | rdev->pm.dpm_enabled = true; | 1384 | rdev->pm.dpm_enabled = true; |
1393 | 1385 | ||
1394 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); | ||
1395 | if (ret) | ||
1396 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
1397 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); | ||
1398 | if (ret) | ||
1399 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
1400 | /* XXX: these are noops for dpm but are here for backwards compat */ | ||
1401 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | ||
1402 | if (ret) | ||
1403 | DRM_ERROR("failed to create device file for power profile\n"); | ||
1404 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | ||
1405 | if (ret) | ||
1406 | DRM_ERROR("failed to create device file for power method\n"); | ||
1407 | |||
1408 | if (radeon_debugfs_pm_init(rdev)) { | 1386 | if (radeon_debugfs_pm_init(rdev)) { |
1409 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | 1387 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); |
1410 | } | 1388 | } |
@@ -1545,9 +1523,44 @@ int radeon_pm_late_init(struct radeon_device *rdev) | |||
1545 | int ret = 0; | 1523 | int ret = 0; |
1546 | 1524 | ||
1547 | if (rdev->pm.pm_method == PM_METHOD_DPM) { | 1525 | if (rdev->pm.pm_method == PM_METHOD_DPM) { |
1548 | mutex_lock(&rdev->pm.mutex); | 1526 | if (rdev->pm.dpm_enabled) { |
1549 | ret = radeon_dpm_late_enable(rdev); | 1527 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); |
1550 | mutex_unlock(&rdev->pm.mutex); | 1528 | if (ret) |
1529 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
1530 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); | ||
1531 | if (ret) | ||
1532 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
1533 | /* XXX: these are noops for dpm but are here for backwards compat */ | ||
1534 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | ||
1535 | if (ret) | ||
1536 | DRM_ERROR("failed to create device file for power profile\n"); | ||
1537 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | ||
1538 | if (ret) | ||
1539 | DRM_ERROR("failed to create device file for power method\n"); | ||
1540 | |||
1541 | mutex_lock(&rdev->pm.mutex); | ||
1542 | ret = radeon_dpm_late_enable(rdev); | ||
1543 | mutex_unlock(&rdev->pm.mutex); | ||
1544 | if (ret) { | ||
1545 | rdev->pm.dpm_enabled = false; | ||
1546 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
1547 | } else { | ||
1548 | /* set the dpm state for PX since there won't be | ||
1549 | * a modeset to call this. | ||
1550 | */ | ||
1551 | radeon_pm_compute_clocks(rdev); | ||
1552 | } | ||
1553 | } | ||
1554 | } else { | ||
1555 | if (rdev->pm.num_power_states > 1) { | ||
1556 | /* where's the best place to put these? */ | ||
1557 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | ||
1558 | if (ret) | ||
1559 | DRM_ERROR("failed to create device file for power profile\n"); | ||
1560 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | ||
1561 | if (ret) | ||
1562 | DRM_ERROR("failed to create device file for power method\n"); | ||
1563 | } | ||
1551 | } | 1564 | } |
1552 | return ret; | 1565 | return ret; |
1553 | } | 1566 | } |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 787cd8fd897f..e72bf46042e0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2927,6 +2927,8 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { | |||
2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, | 2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, | ||
2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, | ||
2930 | { 0, 0, 0, 0 }, | 2932 | { 0, 0, 0, 0 }, |
2931 | }; | 2933 | }; |
2932 | 2934 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d9b7de25613..745e996d2dbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
882 | if (ret) | 882 | if (ret) |
883 | return ret; | 883 | return ret; |
884 | man = &bdev->man[mem_type]; | 884 | man = &bdev->man[mem_type]; |
885 | if (!man->has_type || !man->use_type) | ||
886 | continue; | ||
885 | 887 | ||
886 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, | 888 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, |
887 | &cur_flags); | 889 | &cur_flags); |
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
889 | if (!type_ok) | 891 | if (!type_ok) |
890 | continue; | 892 | continue; |
891 | 893 | ||
894 | type_found = true; | ||
892 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 895 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
893 | cur_flags); | 896 | cur_flags); |
894 | /* | 897 | /* |
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
901 | if (mem_type == TTM_PL_SYSTEM) | 904 | if (mem_type == TTM_PL_SYSTEM) |
902 | break; | 905 | break; |
903 | 906 | ||
904 | if (man->has_type && man->use_type) { | 907 | ret = (*man->func->get_node)(man, bo, place, mem); |
905 | type_found = true; | 908 | if (unlikely(ret)) |
906 | ret = (*man->func->get_node)(man, bo, place, mem); | 909 | return ret; |
907 | if (unlikely(ret)) | 910 | |
908 | return ret; | ||
909 | } | ||
910 | if (mem->mm_node) | 911 | if (mem->mm_node) |
911 | break; | 912 | break; |
912 | } | 913 | } |
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
917 | return 0; | 918 | return 0; |
918 | } | 919 | } |
919 | 920 | ||
920 | if (!type_found) | ||
921 | return -EINVAL; | ||
922 | |||
923 | for (i = 0; i < placement->num_busy_placement; ++i) { | 921 | for (i = 0; i < placement->num_busy_placement; ++i) { |
924 | const struct ttm_place *place = &placement->busy_placement[i]; | 922 | const struct ttm_place *place = &placement->busy_placement[i]; |
925 | 923 | ||
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
927 | if (ret) | 925 | if (ret) |
928 | return ret; | 926 | return ret; |
929 | man = &bdev->man[mem_type]; | 927 | man = &bdev->man[mem_type]; |
930 | if (!man->has_type) | 928 | if (!man->has_type || !man->use_type) |
931 | continue; | 929 | continue; |
932 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) | 930 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) |
933 | continue; | 931 | continue; |
934 | 932 | ||
933 | type_found = true; | ||
935 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 934 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
936 | cur_flags); | 935 | cur_flags); |
937 | /* | 936 | /* |
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
957 | if (ret == -ERESTARTSYS) | 956 | if (ret == -ERESTARTSYS) |
958 | has_erestartsys = true; | 957 | has_erestartsys = true; |
959 | } | 958 | } |
960 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | 959 | |
961 | return ret; | 960 | if (!type_found) { |
961 | printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); | ||
962 | return -EINVAL; | ||
963 | } | ||
964 | |||
965 | return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | ||
962 | } | 966 | } |
963 | EXPORT_SYMBOL(ttm_bo_mem_space); | 967 | EXPORT_SYMBOL(ttm_bo_mem_space); |
964 | 968 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index db8b49101a8b..512263919282 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c | |||
@@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) | |||
34 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 34 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
35 | struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; | 35 | struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; |
36 | 36 | ||
37 | seq_printf(m, "fence %ld %lld\n", | 37 | seq_printf(m, "fence %llu %lld\n", |
38 | atomic64_read(&vgdev->fence_drv.last_seq), | 38 | (u64)atomic64_read(&vgdev->fence_drv.last_seq), |
39 | vgdev->fence_drv.sync_seq); | 39 | vgdev->fence_drv.sync_seq); |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..67097c9ce9c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c | |||
@@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size) | |||
61 | { | 61 | { |
62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); | 62 | struct virtio_gpu_fence *fence = to_virtio_fence(f); |
63 | 63 | ||
64 | snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq)); | 64 | snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); |
65 | } | 65 | } |
66 | 66 | ||
67 | static const struct fence_ops virtio_fence_ops = { | 67 | static const struct fence_ops virtio_fence_ops = { |
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 67720f70fe29..b49445df8a7e 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config DRM_VMWGFX | 1 | config DRM_VMWGFX |
2 | tristate "DRM driver for VMware Virtual GPU" | 2 | tristate "DRM driver for VMware Virtual GPU" |
3 | depends on DRM && PCI | 3 | depends on DRM && PCI && X86 |
4 | select FB_DEFERRED_IO | 4 | select FB_DEFERRED_IO |
5 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
6 | select FB_CFB_COPYAREA | 6 | select FB_CFB_COPYAREA |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, | |||
681 | 0, 0, | 681 | 0, 0, |
682 | DRM_MM_SEARCH_DEFAULT, | 682 | DRM_MM_SEARCH_DEFAULT, |
683 | DRM_MM_CREATE_DEFAULT); | 683 | DRM_MM_CREATE_DEFAULT); |
684 | if (ret) { | ||
685 | (void) vmw_cmdbuf_man_process(man); | ||
686 | ret = drm_mm_insert_node_generic(&man->mm, info->node, | ||
687 | info->page_size, 0, 0, | ||
688 | DRM_MM_SEARCH_DEFAULT, | ||
689 | DRM_MM_CREATE_DEFAULT); | ||
690 | } | ||
691 | |||
684 | spin_unlock_bh(&man->lock); | 692 | spin_unlock_bh(&man->lock); |
685 | info->done = !ret; | 693 | info->done = !ret; |
686 | 694 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ce659a125f2b..092ea81eeff7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
311 | struct vmw_private *dev_priv = res->dev_priv; | 311 | struct vmw_private *dev_priv = res->dev_priv; |
312 | struct ttm_buffer_object *bo = val_buf->bo; | 312 | struct ttm_buffer_object *bo = val_buf->bo; |
313 | struct vmw_fence_obj *fence; | 313 | struct vmw_fence_obj *fence; |
314 | int ret; | ||
315 | 314 | ||
316 | if (list_empty(&res->mob_head)) | 315 | if (list_empty(&res->mob_head)) |
317 | return 0; | 316 | return 0; |
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
328 | if (likely(fence != NULL)) | 327 | if (likely(fence != NULL)) |
329 | vmw_fence_obj_unreference(&fence); | 328 | vmw_fence_obj_unreference(&fence); |
330 | 329 | ||
331 | return ret; | 330 | return 0; |
332 | } | 331 | } |
333 | 332 | ||
334 | /** | 333 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e13b20bd9908..2c7a25c71af2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
752 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 752 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
753 | dev_priv->active_master = &dev_priv->fbdev_master; | 753 | dev_priv->active_master = &dev_priv->fbdev_master; |
754 | 754 | ||
755 | 755 | dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start, | |
756 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 756 | dev_priv->mmio_size); |
757 | dev_priv->mmio_size); | ||
758 | |||
759 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | ||
760 | dev_priv->mmio_size); | ||
761 | 757 | ||
762 | if (unlikely(dev_priv->mmio_virt == NULL)) { | 758 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
763 | ret = -ENOMEM; | 759 | ret = -ENOMEM; |
@@ -913,7 +909,6 @@ out_no_device: | |||
913 | out_err4: | 909 | out_err4: |
914 | iounmap(dev_priv->mmio_virt); | 910 | iounmap(dev_priv->mmio_virt); |
915 | out_err3: | 911 | out_err3: |
916 | arch_phys_wc_del(dev_priv->mmio_mtrr); | ||
917 | vmw_ttm_global_release(dev_priv); | 912 | vmw_ttm_global_release(dev_priv); |
918 | out_err0: | 913 | out_err0: |
919 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 914 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
964 | 959 | ||
965 | ttm_object_device_release(&dev_priv->tdev); | 960 | ttm_object_device_release(&dev_priv->tdev); |
966 | iounmap(dev_priv->mmio_virt); | 961 | iounmap(dev_priv->mmio_virt); |
967 | arch_phys_wc_del(dev_priv->mmio_mtrr); | ||
968 | if (dev_priv->ctx.staged_bindings) | 962 | if (dev_priv->ctx.staged_bindings) |
969 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); | 963 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
970 | vmw_ttm_global_release(dev_priv); | 964 | vmw_ttm_global_release(dev_priv); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6d02de6dc36c..f19fd39b43e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -376,7 +376,6 @@ struct vmw_private { | |||
376 | uint32_t initial_width; | 376 | uint32_t initial_width; |
377 | uint32_t initial_height; | 377 | uint32_t initial_height; |
378 | u32 __iomem *mmio_virt; | 378 | u32 __iomem *mmio_virt; |
379 | int mmio_mtrr; | ||
380 | uint32_t capabilities; | 379 | uint32_t capabilities; |
381 | uint32_t max_gmr_ids; | 380 | uint32_t max_gmr_ids; |
382 | uint32_t max_gmr_pages; | 381 | uint32_t max_gmr_pages; |
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
631 | uint32_t size, | 630 | uint32_t size, |
632 | bool shareable, | 631 | bool shareable, |
633 | uint32_t *handle, | 632 | uint32_t *handle, |
634 | struct vmw_dma_buffer **p_dma_buf); | 633 | struct vmw_dma_buffer **p_dma_buf, |
634 | struct ttm_base_object **p_base); | ||
635 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 635 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
636 | struct vmw_dma_buffer *dma_buf, | 636 | struct vmw_dma_buffer *dma_buf, |
637 | uint32_t *handle); | 637 | uint32_t *handle); |
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | |||
645 | uint32_t cur_validate_node); | 645 | uint32_t cur_validate_node); |
646 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 646 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
647 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 647 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
648 | uint32_t id, struct vmw_dma_buffer **out); | 648 | uint32_t id, struct vmw_dma_buffer **out, |
649 | struct ttm_base_object **base); | ||
649 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 650 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
650 | struct drm_file *file_priv); | 651 | struct drm_file *file_priv); |
651 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 652 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index b56565457c96..5da5de0cb522 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
1236 | struct vmw_relocation *reloc; | 1236 | struct vmw_relocation *reloc; |
1237 | int ret; | 1237 | int ret; |
1238 | 1238 | ||
1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1240 | NULL); | ||
1240 | if (unlikely(ret != 0)) { | 1241 | if (unlikely(ret != 0)) { |
1241 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 1242 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
1242 | ret = -EINVAL; | 1243 | ret = -EINVAL; |
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
1296 | struct vmw_relocation *reloc; | 1297 | struct vmw_relocation *reloc; |
1297 | int ret; | 1298 | int ret; |
1298 | 1299 | ||
1299 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 1300 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1301 | NULL); | ||
1300 | if (unlikely(ret != 0)) { | 1302 | if (unlikely(ret != 0)) { |
1301 | DRM_ERROR("Could not find or use GMR region.\n"); | 1303 | DRM_ERROR("Could not find or use GMR region.\n"); |
1302 | ret = -EINVAL; | 1304 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 61fb7f3de311..15a6c01cd016 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, | |||
1685 | struct drm_crtc *crtc; | 1685 | struct drm_crtc *crtc; |
1686 | u32 num_units = 0; | 1686 | u32 num_units = 0; |
1687 | u32 i, k; | 1687 | u32 i, k; |
1688 | int ret; | ||
1689 | 1688 | ||
1690 | dirty->dev_priv = dev_priv; | 1689 | dirty->dev_priv = dev_priv; |
1691 | 1690 | ||
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, | |||
1711 | if (!dirty->cmd) { | 1710 | if (!dirty->cmd) { |
1712 | DRM_ERROR("Couldn't reserve fifo space " | 1711 | DRM_ERROR("Couldn't reserve fifo space " |
1713 | "for dirty blits.\n"); | 1712 | "for dirty blits.\n"); |
1714 | return ret; | 1713 | return -ENOMEM; |
1715 | } | 1714 | } |
1716 | memset(dirty->cmd, 0, dirty->fifo_reserve_size); | 1715 | memset(dirty->cmd, 0, dirty->fifo_reserve_size); |
1717 | } | 1716 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 76069f093ccf..222c9c2123a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, | |||
484 | goto out_unlock; | 484 | goto out_unlock; |
485 | } | 485 | } |
486 | 486 | ||
487 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); | 487 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); |
488 | if (ret) | 488 | if (ret) |
489 | goto out_unlock; | 489 | goto out_unlock; |
490 | 490 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c1912f852b42..e57667ca7557 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, | |||
354 | } | 354 | } |
355 | 355 | ||
356 | *out_surf = NULL; | 356 | *out_surf = NULL; |
357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | 357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); |
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
481 | uint32_t size, | 481 | uint32_t size, |
482 | bool shareable, | 482 | bool shareable, |
483 | uint32_t *handle, | 483 | uint32_t *handle, |
484 | struct vmw_dma_buffer **p_dma_buf) | 484 | struct vmw_dma_buffer **p_dma_buf, |
485 | struct ttm_base_object **p_base) | ||
485 | { | 486 | { |
486 | struct vmw_user_dma_buffer *user_bo; | 487 | struct vmw_user_dma_buffer *user_bo; |
487 | struct ttm_buffer_object *tmp; | 488 | struct ttm_buffer_object *tmp; |
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
515 | } | 516 | } |
516 | 517 | ||
517 | *p_dma_buf = &user_bo->dma; | 518 | *p_dma_buf = &user_bo->dma; |
519 | if (p_base) { | ||
520 | *p_base = &user_bo->prime.base; | ||
521 | kref_get(&(*p_base)->refcount); | ||
522 | } | ||
518 | *handle = user_bo->prime.base.hash.key; | 523 | *handle = user_bo->prime.base.hash.key; |
519 | 524 | ||
520 | out_no_base_object: | 525 | out_no_base_object: |
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
631 | struct vmw_dma_buffer *dma_buf; | 636 | struct vmw_dma_buffer *dma_buf; |
632 | struct vmw_user_dma_buffer *user_bo; | 637 | struct vmw_user_dma_buffer *user_bo; |
633 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 638 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
639 | struct ttm_base_object *buffer_base; | ||
634 | int ret; | 640 | int ret; |
635 | 641 | ||
636 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | 642 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
643 | 649 | ||
644 | switch (arg->op) { | 650 | switch (arg->op) { |
645 | case drm_vmw_synccpu_grab: | 651 | case drm_vmw_synccpu_grab: |
646 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); | 652 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, |
653 | &buffer_base); | ||
647 | if (unlikely(ret != 0)) | 654 | if (unlikely(ret != 0)) |
648 | return ret; | 655 | return ret; |
649 | 656 | ||
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | |||
651 | dma); | 658 | dma); |
652 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); | 659 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
653 | vmw_dmabuf_unreference(&dma_buf); | 660 | vmw_dmabuf_unreference(&dma_buf); |
661 | ttm_base_object_unref(&buffer_base); | ||
654 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && | 662 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
655 | ret != -EBUSY)) { | 663 | ret != -EBUSY)) { |
656 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", | 664 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
692 | return ret; | 700 | return ret; |
693 | 701 | ||
694 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, | 702 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
695 | req->size, false, &handle, &dma_buf); | 703 | req->size, false, &handle, &dma_buf, |
704 | NULL); | ||
696 | if (unlikely(ret != 0)) | 705 | if (unlikely(ret != 0)) |
697 | goto out_no_dmabuf; | 706 | goto out_no_dmabuf; |
698 | 707 | ||
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | |||
721 | } | 730 | } |
722 | 731 | ||
723 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 732 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
724 | uint32_t handle, struct vmw_dma_buffer **out) | 733 | uint32_t handle, struct vmw_dma_buffer **out, |
734 | struct ttm_base_object **p_base) | ||
725 | { | 735 | { |
726 | struct vmw_user_dma_buffer *vmw_user_bo; | 736 | struct vmw_user_dma_buffer *vmw_user_bo; |
727 | struct ttm_base_object *base; | 737 | struct ttm_base_object *base; |
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
743 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, | 753 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
744 | prime.base); | 754 | prime.base); |
745 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); | 755 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
746 | ttm_base_object_unref(&base); | 756 | if (p_base) |
757 | *p_base = base; | ||
758 | else | ||
759 | ttm_base_object_unref(&base); | ||
747 | *out = &vmw_user_bo->dma; | 760 | *out = &vmw_user_bo->dma; |
748 | 761 | ||
749 | return 0; | 762 | return 0; |
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, | |||
1004 | 1017 | ||
1005 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, | 1018 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1006 | args->size, false, &args->handle, | 1019 | args->size, false, &args->handle, |
1007 | &dma_buf); | 1020 | &dma_buf, NULL); |
1008 | if (unlikely(ret != 0)) | 1021 | if (unlikely(ret != 0)) |
1009 | goto out_no_dmabuf; | 1022 | goto out_no_dmabuf; |
1010 | 1023 | ||
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, | |||
1032 | struct vmw_dma_buffer *out_buf; | 1045 | struct vmw_dma_buffer *out_buf; |
1033 | int ret; | 1046 | int ret; |
1034 | 1047 | ||
1035 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); | 1048 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); |
1036 | if (ret != 0) | 1049 | if (ret != 0) |
1037 | return -EINVAL; | 1050 | return -EINVAL; |
1038 | 1051 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index bba1ee395478..fd47547b0234 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, | |||
855 | 855 | ||
856 | if (buffer_handle != SVGA3D_INVALID_ID) { | 856 | if (buffer_handle != SVGA3D_INVALID_ID) { |
857 | ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, | 857 | ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, |
858 | &buffer); | 858 | &buffer, NULL); |
859 | if (unlikely(ret != 0)) { | 859 | if (unlikely(ret != 0)) { |
860 | DRM_ERROR("Could not find buffer for shader " | 860 | DRM_ERROR("Could not find buffer for shader " |
861 | "creation.\n"); | 861 | "creation.\n"); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 3361769842f4..03f63c749c02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -46,6 +46,7 @@ struct vmw_user_surface { | |||
46 | struct vmw_surface srf; | 46 | struct vmw_surface srf; |
47 | uint32_t size; | 47 | uint32_t size; |
48 | struct drm_master *master; | 48 | struct drm_master *master; |
49 | struct ttm_base_object *backup_base; | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | /** | 52 | /** |
@@ -656,6 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | |||
656 | struct vmw_resource *res = &user_srf->srf.res; | 657 | struct vmw_resource *res = &user_srf->srf.res; |
657 | 658 | ||
658 | *p_base = NULL; | 659 | *p_base = NULL; |
660 | if (user_srf->backup_base) | ||
661 | ttm_base_object_unref(&user_srf->backup_base); | ||
659 | vmw_resource_unreference(&res); | 662 | vmw_resource_unreference(&res); |
660 | } | 663 | } |
661 | 664 | ||
@@ -851,7 +854,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
851 | res->backup_size, | 854 | res->backup_size, |
852 | true, | 855 | true, |
853 | &backup_handle, | 856 | &backup_handle, |
854 | &res->backup); | 857 | &res->backup, |
858 | &user_srf->backup_base); | ||
855 | if (unlikely(ret != 0)) { | 859 | if (unlikely(ret != 0)) { |
856 | vmw_resource_unreference(&res); | 860 | vmw_resource_unreference(&res); |
857 | goto out_unlock; | 861 | goto out_unlock; |
@@ -1321,7 +1325,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1321 | 1325 | ||
1322 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | 1326 | if (req->buffer_handle != SVGA3D_INVALID_ID) { |
1323 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | 1327 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, |
1324 | &res->backup); | 1328 | &res->backup, |
1329 | &user_srf->backup_base); | ||
1325 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < | 1330 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < |
1326 | res->backup_size) { | 1331 | res->backup_size) { |
1327 | DRM_ERROR("Surface backup buffer is too small.\n"); | 1332 | DRM_ERROR("Surface backup buffer is too small.\n"); |
@@ -1335,7 +1340,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1335 | req->drm_surface_flags & | 1340 | req->drm_surface_flags & |
1336 | drm_vmw_surface_flag_shareable, | 1341 | drm_vmw_surface_flag_shareable, |
1337 | &backup_handle, | 1342 | &backup_handle, |
1338 | &res->backup); | 1343 | &res->backup, |
1344 | &user_srf->backup_base); | ||
1339 | 1345 | ||
1340 | if (unlikely(ret != 0)) { | 1346 | if (unlikely(ret != 0)) { |
1341 | vmw_resource_unreference(&res); | 1347 | vmw_resource_unreference(&res); |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 243f99a80253..e5a38d202a21 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -912,7 +912,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs) | |||
912 | } | 912 | } |
913 | } | 913 | } |
914 | 914 | ||
915 | static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) | 915 | static void ipu_irq_handler(struct irq_desc *desc) |
916 | { | 916 | { |
917 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 917 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
918 | struct irq_chip *chip = irq_desc_get_chip(desc); | 918 | struct irq_chip *chip = irq_desc_get_chip(desc); |
@@ -925,7 +925,7 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
925 | chained_irq_exit(chip, desc); | 925 | chained_irq_exit(chip, desc); |
926 | } | 926 | } |
927 | 927 | ||
928 | static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc) | 928 | static void ipu_err_irq_handler(struct irq_desc *desc) |
929 | { | 929 | { |
930 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 930 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
931 | struct irq_chip *chip = irq_desc_get_chip(desc); | 931 | struct irq_chip *chip = irq_desc_get_chip(desc); |
@@ -1099,8 +1099,7 @@ static int ipu_irq_init(struct ipu_soc *ipu) | |||
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU", | 1101 | ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU", |
1102 | handle_level_irq, 0, | 1102 | handle_level_irq, 0, 0, 0); |
1103 | IRQF_VALID, 0); | ||
1104 | if (ret < 0) { | 1103 | if (ret < 0) { |
1105 | dev_err(ipu->dev, "failed to alloc generic irq chips\n"); | 1104 | dev_err(ipu->dev, "failed to alloc generic irq chips\n"); |
1106 | irq_domain_remove(ipu->domain); | 1105 | irq_domain_remove(ipu->domain); |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 2f9aead4ecfc..652afd11a9ef 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -204,6 +204,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
204 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); | 204 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); |
205 | list_del(&channel->listentry); | 205 | list_del(&channel->listentry); |
206 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); | 206 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); |
207 | |||
208 | primary_channel = channel; | ||
207 | } else { | 209 | } else { |
208 | primary_channel = channel->primary_channel; | 210 | primary_channel = channel->primary_channel; |
209 | spin_lock_irqsave(&primary_channel->lock, flags); | 211 | spin_lock_irqsave(&primary_channel->lock, flags); |
@@ -211,6 +213,14 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
211 | primary_channel->num_sc--; | 213 | primary_channel->num_sc--; |
212 | spin_unlock_irqrestore(&primary_channel->lock, flags); | 214 | spin_unlock_irqrestore(&primary_channel->lock, flags); |
213 | } | 215 | } |
216 | |||
217 | /* | ||
218 | * We need to free the bit for init_vp_index() to work in the case | ||
219 | * of sub-channel, when we reload drivers like hv_netvsc. | ||
220 | */ | ||
221 | cpumask_clear_cpu(channel->target_cpu, | ||
222 | &primary_channel->alloced_cpus_in_node); | ||
223 | |||
214 | free_channel(channel); | 224 | free_channel(channel); |
215 | } | 225 | } |
216 | 226 | ||
@@ -458,6 +468,13 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui | |||
458 | continue; | 468 | continue; |
459 | } | 469 | } |
460 | 470 | ||
471 | /* | ||
472 | * NOTE: in the case of sub-channel, we clear the sub-channel | ||
473 | * related bit(s) in primary->alloced_cpus_in_node in | ||
474 | * hv_process_channel_removal(), so when we reload drivers | ||
475 | * like hv_netvsc in SMP guest, here we're able to re-allocate | ||
476 | * bit from primary->alloced_cpus_in_node. | ||
477 | */ | ||
461 | if (!cpumask_test_cpu(cur_cpu, | 478 | if (!cpumask_test_cpu(cur_cpu, |
462 | &primary->alloced_cpus_in_node)) { | 479 | &primary->alloced_cpus_in_node)) { |
463 | cpumask_set_cpu(cur_cpu, | 480 | cpumask_set_cpu(cur_cpu, |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 500b262b89bb..e13c902e8966 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -1140,8 +1140,8 @@ config SENSORS_NCT6775 | |||
1140 | help | 1140 | help |
1141 | If you say yes here you get support for the hardware monitoring | 1141 | If you say yes here you get support for the hardware monitoring |
1142 | functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D, | 1142 | functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D, |
1143 | NCT6791D, NCT6792D and compatible Super-I/O chips. This driver | 1143 | NCT6791D, NCT6792D, NCT6793D, and compatible Super-I/O chips. This |
1144 | replaces the w83627ehf driver for NCT6775F and NCT6776F. | 1144 | driver replaces the w83627ehf driver for NCT6775F and NCT6776F. |
1145 | 1145 | ||
1146 | This driver can also be built as a module. If so, the module | 1146 | This driver can also be built as a module. If so, the module |
1147 | will be called nct6775. | 1147 | will be called nct6775. |
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c | |||
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { | |||
470 | { .compatible = "stericsson,abx500-temp" }, | 470 | { .compatible = "stericsson,abx500-temp" }, |
471 | {}, | 471 | {}, |
472 | }; | 472 | }; |
473 | MODULE_DEVICE_TABLE(of, abx500_temp_match); | ||
473 | #endif | 474 | #endif |
474 | 475 | ||
475 | static struct platform_driver abx500_temp_driver = { | 476 | static struct platform_driver abx500_temp_driver = { |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { | |||
539 | { .compatible = "gpio-fan", }, | 539 | { .compatible = "gpio-fan", }, |
540 | {}, | 540 | {}, |
541 | }; | 541 | }; |
542 | MODULE_DEVICE_TABLE(of, of_gpio_fan_match); | ||
542 | #endif /* CONFIG_OF_GPIO */ | 543 | #endif /* CONFIG_OF_GPIO */ |
543 | 544 | ||
544 | static int gpio_fan_probe(struct platform_device *pdev) | 545 | static int gpio_fan_probe(struct platform_device *pdev) |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index bd1c99deac71..8b4fa55e46c6 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -39,6 +39,7 @@ | |||
39 | * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3 | 39 | * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3 |
40 | * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3 | 40 | * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3 |
41 | * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3 | 41 | * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3 |
42 | * nct6793d 15 6 6 2+6 0xd120 0xc1 0x5ca3 | ||
42 | * | 43 | * |
43 | * #temp lists the number of monitored temperature sources (first value) plus | 44 | * #temp lists the number of monitored temperature sources (first value) plus |
44 | * the number of directly connectable temperature sensors (second value). | 45 | * the number of directly connectable temperature sensors (second value). |
@@ -63,7 +64,7 @@ | |||
63 | 64 | ||
64 | #define USE_ALTERNATE | 65 | #define USE_ALTERNATE |
65 | 66 | ||
66 | enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 }; | 67 | enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 }; |
67 | 68 | ||
68 | /* used to set data->name = nct6775_device_names[data->sio_kind] */ | 69 | /* used to set data->name = nct6775_device_names[data->sio_kind] */ |
69 | static const char * const nct6775_device_names[] = { | 70 | static const char * const nct6775_device_names[] = { |
@@ -73,6 +74,17 @@ static const char * const nct6775_device_names[] = { | |||
73 | "nct6779", | 74 | "nct6779", |
74 | "nct6791", | 75 | "nct6791", |
75 | "nct6792", | 76 | "nct6792", |
77 | "nct6793", | ||
78 | }; | ||
79 | |||
80 | static const char * const nct6775_sio_names[] __initconst = { | ||
81 | "NCT6106D", | ||
82 | "NCT6775F", | ||
83 | "NCT6776D/F", | ||
84 | "NCT6779D", | ||
85 | "NCT6791D", | ||
86 | "NCT6792D", | ||
87 | "NCT6793D", | ||
76 | }; | 88 | }; |
77 | 89 | ||
78 | static unsigned short force_id; | 90 | static unsigned short force_id; |
@@ -104,6 +116,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); | |||
104 | #define SIO_NCT6779_ID 0xc560 | 116 | #define SIO_NCT6779_ID 0xc560 |
105 | #define SIO_NCT6791_ID 0xc800 | 117 | #define SIO_NCT6791_ID 0xc800 |
106 | #define SIO_NCT6792_ID 0xc910 | 118 | #define SIO_NCT6792_ID 0xc910 |
119 | #define SIO_NCT6793_ID 0xd120 | ||
107 | #define SIO_ID_MASK 0xFFF0 | 120 | #define SIO_ID_MASK 0xFFF0 |
108 | 121 | ||
109 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; | 122 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; |
@@ -354,6 +367,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1] | |||
354 | 367 | ||
355 | /* NCT6776 specific data */ | 368 | /* NCT6776 specific data */ |
356 | 369 | ||
370 | /* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */ | ||
371 | #define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME | ||
372 | #define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME | ||
373 | |||
357 | static const s8 NCT6776_ALARM_BITS[] = { | 374 | static const s8 NCT6776_ALARM_BITS[] = { |
358 | 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */ | 375 | 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */ |
359 | 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */ | 376 | 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */ |
@@ -533,7 +550,7 @@ static const s8 NCT6791_ALARM_BITS[] = { | |||
533 | 4, 5, 13, -1, -1, -1, /* temp1..temp6 */ | 550 | 4, 5, 13, -1, -1, -1, /* temp1..temp6 */ |
534 | 12, 9 }; /* intrusion0, intrusion1 */ | 551 | 12, 9 }; /* intrusion0, intrusion1 */ |
535 | 552 | ||
536 | /* NCT6792 specific data */ | 553 | /* NCT6792/NCT6793 specific data */ |
537 | 554 | ||
538 | static const u16 NCT6792_REG_TEMP_MON[] = { | 555 | static const u16 NCT6792_REG_TEMP_MON[] = { |
539 | 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d }; | 556 | 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d }; |
@@ -1056,6 +1073,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) | |||
1056 | case nct6779: | 1073 | case nct6779: |
1057 | case nct6791: | 1074 | case nct6791: |
1058 | case nct6792: | 1075 | case nct6792: |
1076 | case nct6793: | ||
1059 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || | 1077 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || |
1060 | ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || | 1078 | ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || |
1061 | reg == 0x402 || | 1079 | reg == 0x402 || |
@@ -1407,6 +1425,7 @@ static void nct6775_update_pwm_limits(struct device *dev) | |||
1407 | case nct6779: | 1425 | case nct6779: |
1408 | case nct6791: | 1426 | case nct6791: |
1409 | case nct6792: | 1427 | case nct6792: |
1428 | case nct6793: | ||
1410 | reg = nct6775_read_value(data, | 1429 | reg = nct6775_read_value(data, |
1411 | data->REG_CRITICAL_PWM_ENABLE[i]); | 1430 | data->REG_CRITICAL_PWM_ENABLE[i]); |
1412 | if (reg & data->CRITICAL_PWM_ENABLE_MASK) | 1431 | if (reg & data->CRITICAL_PWM_ENABLE_MASK) |
@@ -2822,6 +2841,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr, | |||
2822 | case nct6779: | 2841 | case nct6779: |
2823 | case nct6791: | 2842 | case nct6791: |
2824 | case nct6792: | 2843 | case nct6792: |
2844 | case nct6793: | ||
2825 | nct6775_write_value(data, data->REG_CRITICAL_PWM[nr], | 2845 | nct6775_write_value(data, data->REG_CRITICAL_PWM[nr], |
2826 | val); | 2846 | val); |
2827 | reg = nct6775_read_value(data, | 2847 | reg = nct6775_read_value(data, |
@@ -3256,7 +3276,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data) | |||
3256 | pwm4pin = false; | 3276 | pwm4pin = false; |
3257 | pwm5pin = false; | 3277 | pwm5pin = false; |
3258 | pwm6pin = false; | 3278 | pwm6pin = false; |
3259 | } else { /* NCT6779D, NCT6791D, or NCT6792D */ | 3279 | } else { /* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */ |
3260 | regval = superio_inb(sioreg, 0x1c); | 3280 | regval = superio_inb(sioreg, 0x1c); |
3261 | 3281 | ||
3262 | fan3pin = !(regval & (1 << 5)); | 3282 | fan3pin = !(regval & (1 << 5)); |
@@ -3269,7 +3289,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) | |||
3269 | 3289 | ||
3270 | fan4min = fan4pin; | 3290 | fan4min = fan4pin; |
3271 | 3291 | ||
3272 | if (data->kind == nct6791 || data->kind == nct6792) { | 3292 | if (data->kind == nct6791 || data->kind == nct6792 || |
3293 | data->kind == nct6793) { | ||
3273 | regval = superio_inb(sioreg, 0x2d); | 3294 | regval = superio_inb(sioreg, 0x2d); |
3274 | fan6pin = (regval & (1 << 1)); | 3295 | fan6pin = (regval & (1 << 1)); |
3275 | pwm6pin = (regval & (1 << 0)); | 3296 | pwm6pin = (regval & (1 << 0)); |
@@ -3528,8 +3549,8 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3528 | data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES; | 3549 | data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES; |
3529 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; | 3550 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; |
3530 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; | 3551 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; |
3531 | data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; | 3552 | data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; |
3532 | data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; | 3553 | data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; |
3533 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; | 3554 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; |
3534 | data->REG_PWM[0] = NCT6775_REG_PWM; | 3555 | data->REG_PWM[0] = NCT6775_REG_PWM; |
3535 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; | 3556 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; |
@@ -3600,8 +3621,8 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3600 | data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; | 3621 | data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; |
3601 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; | 3622 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; |
3602 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; | 3623 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; |
3603 | data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; | 3624 | data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; |
3604 | data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; | 3625 | data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; |
3605 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; | 3626 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; |
3606 | data->REG_PWM[0] = NCT6775_REG_PWM; | 3627 | data->REG_PWM[0] = NCT6775_REG_PWM; |
3607 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; | 3628 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; |
@@ -3643,6 +3664,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3643 | break; | 3664 | break; |
3644 | case nct6791: | 3665 | case nct6791: |
3645 | case nct6792: | 3666 | case nct6792: |
3667 | case nct6793: | ||
3646 | data->in_num = 15; | 3668 | data->in_num = 15; |
3647 | data->pwm_num = 6; | 3669 | data->pwm_num = 6; |
3648 | data->auto_pwm_num = 4; | 3670 | data->auto_pwm_num = 4; |
@@ -3677,8 +3699,8 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3677 | data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; | 3699 | data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; |
3678 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; | 3700 | data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; |
3679 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; | 3701 | data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; |
3680 | data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; | 3702 | data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; |
3681 | data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; | 3703 | data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; |
3682 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; | 3704 | data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; |
3683 | data->REG_PWM[0] = NCT6775_REG_PWM; | 3705 | data->REG_PWM[0] = NCT6775_REG_PWM; |
3684 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; | 3706 | data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; |
@@ -3918,6 +3940,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3918 | case nct6779: | 3940 | case nct6779: |
3919 | case nct6791: | 3941 | case nct6791: |
3920 | case nct6792: | 3942 | case nct6792: |
3943 | case nct6793: | ||
3921 | break; | 3944 | break; |
3922 | } | 3945 | } |
3923 | 3946 | ||
@@ -3950,6 +3973,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3950 | break; | 3973 | break; |
3951 | case nct6791: | 3974 | case nct6791: |
3952 | case nct6792: | 3975 | case nct6792: |
3976 | case nct6793: | ||
3953 | tmp |= 0x7e; | 3977 | tmp |= 0x7e; |
3954 | break; | 3978 | break; |
3955 | } | 3979 | } |
@@ -4047,7 +4071,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) | |||
4047 | if (reg != data->sio_reg_enable) | 4071 | if (reg != data->sio_reg_enable) |
4048 | superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable); | 4072 | superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable); |
4049 | 4073 | ||
4050 | if (data->kind == nct6791 || data->kind == nct6792) | 4074 | if (data->kind == nct6791 || data->kind == nct6792 || |
4075 | data->kind == nct6793) | ||
4051 | nct6791_enable_io_mapping(sioreg); | 4076 | nct6791_enable_io_mapping(sioreg); |
4052 | 4077 | ||
4053 | superio_exit(sioreg); | 4078 | superio_exit(sioreg); |
@@ -4106,15 +4131,6 @@ static struct platform_driver nct6775_driver = { | |||
4106 | .probe = nct6775_probe, | 4131 | .probe = nct6775_probe, |
4107 | }; | 4132 | }; |
4108 | 4133 | ||
4109 | static const char * const nct6775_sio_names[] __initconst = { | ||
4110 | "NCT6106D", | ||
4111 | "NCT6775F", | ||
4112 | "NCT6776D/F", | ||
4113 | "NCT6779D", | ||
4114 | "NCT6791D", | ||
4115 | "NCT6792D", | ||
4116 | }; | ||
4117 | |||
4118 | /* nct6775_find() looks for a '627 in the Super-I/O config space */ | 4134 | /* nct6775_find() looks for a '627 in the Super-I/O config space */ |
4119 | static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | 4135 | static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) |
4120 | { | 4136 | { |
@@ -4150,6 +4166,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | |||
4150 | case SIO_NCT6792_ID: | 4166 | case SIO_NCT6792_ID: |
4151 | sio_data->kind = nct6792; | 4167 | sio_data->kind = nct6792; |
4152 | break; | 4168 | break; |
4169 | case SIO_NCT6793_ID: | ||
4170 | sio_data->kind = nct6793; | ||
4171 | break; | ||
4153 | default: | 4172 | default: |
4154 | if (val != 0xffff) | 4173 | if (val != 0xffff) |
4155 | pr_debug("unsupported chip ID: 0x%04x\n", val); | 4174 | pr_debug("unsupported chip ID: 0x%04x\n", val); |
@@ -4175,7 +4194,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | |||
4175 | superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); | 4194 | superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); |
4176 | } | 4195 | } |
4177 | 4196 | ||
4178 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792) | 4197 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || |
4198 | sio_data->kind == nct6793) | ||
4179 | nct6791_enable_io_mapping(sioaddr); | 4199 | nct6791_enable_io_mapping(sioaddr); |
4180 | 4200 | ||
4181 | superio_exit(sioaddr); | 4201 | superio_exit(sioaddr); |
@@ -4285,7 +4305,7 @@ static void __exit sensors_nct6775_exit(void) | |||
4285 | } | 4305 | } |
4286 | 4306 | ||
4287 | MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); | 4307 | MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); |
4288 | MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver"); | 4308 | MODULE_DESCRIPTION("Driver for NCT6775F and compatible chips"); |
4289 | MODULE_LICENSE("GPL"); | 4309 | MODULE_LICENSE("GPL"); |
4290 | 4310 | ||
4291 | module_init(sensors_nct6775_init); | 4311 | module_init(sensors_nct6775_init); |
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c | |||
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { | |||
323 | { .compatible = "pwm-fan", }, | 323 | { .compatible = "pwm-fan", }, |
324 | {}, | 324 | {}, |
325 | }; | 325 | }; |
326 | MODULE_DEVICE_TABLE(of, of_pwm_fan_match); | ||
326 | 327 | ||
327 | static struct platform_driver pwm_fan_driver = { | 328 | static struct platform_driver pwm_fan_driver = { |
328 | .probe = pwm_fan_probe, | 329 | .probe = pwm_fan_probe, |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 3dd2de31a2f8..472b88285c75 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/dmi.h> | ||
27 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
28 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
29 | #include <linux/clk-provider.h> | 30 | #include <linux/clk-provider.h> |
@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) | |||
51 | } | 52 | } |
52 | 53 | ||
53 | #ifdef CONFIG_ACPI | 54 | #ifdef CONFIG_ACPI |
55 | /* | ||
56 | * The HCNT/LCNT information coming from ACPI should be the most accurate | ||
57 | * for given platform. However, some systems get it wrong. On such systems | ||
58 | * we get better results by calculating those based on the input clock. | ||
59 | */ | ||
60 | static const struct dmi_system_id dw_i2c_no_acpi_params[] = { | ||
61 | { | ||
62 | .ident = "Dell Inspiron 7348", | ||
63 | .matches = { | ||
64 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
65 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), | ||
66 | }, | ||
67 | }, | ||
68 | { } | ||
69 | }; | ||
70 | |||
54 | static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], | 71 | static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], |
55 | u16 *hcnt, u16 *lcnt, u32 *sda_hold) | 72 | u16 *hcnt, u16 *lcnt, u32 *sda_hold) |
56 | { | 73 | { |
@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], | |||
58 | acpi_handle handle = ACPI_HANDLE(&pdev->dev); | 75 | acpi_handle handle = ACPI_HANDLE(&pdev->dev); |
59 | union acpi_object *obj; | 76 | union acpi_object *obj; |
60 | 77 | ||
78 | if (dmi_check_system(dw_i2c_no_acpi_params)) | ||
79 | return; | ||
80 | |||
61 | if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) | 81 | if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) |
62 | return; | 82 | return; |
63 | 83 | ||
@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
253 | adap->dev.parent = &pdev->dev; | 273 | adap->dev.parent = &pdev->dev; |
254 | adap->dev.of_node = pdev->dev.of_node; | 274 | adap->dev.of_node = pdev->dev.of_node; |
255 | 275 | ||
256 | r = i2c_add_numbered_adapter(adap); | ||
257 | if (r) { | ||
258 | dev_err(&pdev->dev, "failure adding adapter\n"); | ||
259 | return r; | ||
260 | } | ||
261 | |||
262 | if (dev->pm_runtime_disabled) { | 276 | if (dev->pm_runtime_disabled) { |
263 | pm_runtime_forbid(&pdev->dev); | 277 | pm_runtime_forbid(&pdev->dev); |
264 | } else { | 278 | } else { |
@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
268 | pm_runtime_enable(&pdev->dev); | 282 | pm_runtime_enable(&pdev->dev); |
269 | } | 283 | } |
270 | 284 | ||
285 | r = i2c_add_numbered_adapter(adap); | ||
286 | if (r) { | ||
287 | dev_err(&pdev->dev, "failure adding adapter\n"); | ||
288 | pm_runtime_disable(&pdev->dev); | ||
289 | return r; | ||
290 | } | ||
291 | |||
271 | return 0; | 292 | return 0; |
272 | } | 293 | } |
273 | 294 | ||
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d8361dada584..d8b5a8fee1e6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev) | |||
690 | return ret; | 690 | return ret; |
691 | } | 691 | } |
692 | 692 | ||
693 | pm_runtime_enable(dev); | ||
694 | platform_set_drvdata(pdev, priv); | ||
695 | |||
693 | ret = i2c_add_numbered_adapter(adap); | 696 | ret = i2c_add_numbered_adapter(adap); |
694 | if (ret < 0) { | 697 | if (ret < 0) { |
695 | dev_err(dev, "reg adap failed: %d\n", ret); | 698 | dev_err(dev, "reg adap failed: %d\n", ret); |
699 | pm_runtime_disable(dev); | ||
696 | return ret; | 700 | return ret; |
697 | } | 701 | } |
698 | 702 | ||
699 | pm_runtime_enable(dev); | ||
700 | platform_set_drvdata(pdev, priv); | ||
701 | |||
702 | dev_info(dev, "probed\n"); | 703 | dev_info(dev, "probed\n"); |
703 | 704 | ||
704 | return 0; | 705 | return 0; |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 50bfd8cef5f2..5df819610d52 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1243 | i2c->adap.nr = i2c->pdata->bus_num; | 1243 | i2c->adap.nr = i2c->pdata->bus_num; |
1244 | i2c->adap.dev.of_node = pdev->dev.of_node; | 1244 | i2c->adap.dev.of_node = pdev->dev.of_node; |
1245 | 1245 | ||
1246 | platform_set_drvdata(pdev, i2c); | ||
1247 | |||
1248 | pm_runtime_enable(&pdev->dev); | ||
1249 | |||
1246 | ret = i2c_add_numbered_adapter(&i2c->adap); | 1250 | ret = i2c_add_numbered_adapter(&i2c->adap); |
1247 | if (ret < 0) { | 1251 | if (ret < 0) { |
1248 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); | 1252 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); |
1253 | pm_runtime_disable(&pdev->dev); | ||
1249 | s3c24xx_i2c_deregister_cpufreq(i2c); | 1254 | s3c24xx_i2c_deregister_cpufreq(i2c); |
1250 | clk_unprepare(i2c->clk); | 1255 | clk_unprepare(i2c->clk); |
1251 | return ret; | 1256 | return ret; |
1252 | } | 1257 | } |
1253 | 1258 | ||
1254 | platform_set_drvdata(pdev, i2c); | ||
1255 | |||
1256 | pm_runtime_enable(&pdev->dev); | ||
1257 | pm_runtime_enable(&i2c->adap.dev); | 1259 | pm_runtime_enable(&i2c->adap.dev); |
1258 | 1260 | ||
1259 | dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); | 1261 | dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5f89f1e3c2f2..a59c3111f7fb 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev) | |||
694 | goto err_clear_wakeup_irq; | 694 | goto err_clear_wakeup_irq; |
695 | 695 | ||
696 | status = dev_pm_domain_attach(&client->dev, true); | 696 | status = dev_pm_domain_attach(&client->dev, true); |
697 | if (status != -EPROBE_DEFER) { | 697 | if (status == -EPROBE_DEFER) |
698 | status = driver->probe(client, i2c_match_id(driver->id_table, | 698 | goto err_clear_wakeup_irq; |
699 | client)); | 699 | |
700 | if (status) | 700 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); |
701 | goto err_detach_pm_domain; | 701 | if (status) |
702 | } | 702 | goto err_detach_pm_domain; |
703 | 703 | ||
704 | return 0; | 704 | return 0; |
705 | 705 | ||
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { | |||
620 | .name = "C6-SKL", | 620 | .name = "C6-SKL", |
621 | .desc = "MWAIT 0x20", | 621 | .desc = "MWAIT 0x20", |
622 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 622 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
623 | .exit_latency = 75, | 623 | .exit_latency = 85, |
624 | .target_residency = 200, | 624 | .target_residency = 200, |
625 | .enter = &intel_idle, | 625 | .enter = &intel_idle, |
626 | .enter_freeze = intel_idle_freeze, }, | 626 | .enter_freeze = intel_idle_freeze, }, |
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = { | |||
636 | .name = "C8-SKL", | 636 | .name = "C8-SKL", |
637 | .desc = "MWAIT 0x40", | 637 | .desc = "MWAIT 0x40", |
638 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, | 638 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, |
639 | .exit_latency = 174, | 639 | .exit_latency = 200, |
640 | .target_residency = 800, | 640 | .target_residency = 800, |
641 | .enter = &intel_idle, | 641 | .enter = &intel_idle, |
642 | .enter_freeze = intel_idle_freeze, }, | 642 | .enter_freeze = intel_idle_freeze, }, |
643 | { | 643 | { |
644 | .name = "C9-SKL", | ||
645 | .desc = "MWAIT 0x50", | ||
646 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, | ||
647 | .exit_latency = 480, | ||
648 | .target_residency = 5000, | ||
649 | .enter = &intel_idle, | ||
650 | .enter_freeze = intel_idle_freeze, }, | ||
651 | { | ||
644 | .name = "C10-SKL", | 652 | .name = "C10-SKL", |
645 | .desc = "MWAIT 0x60", | 653 | .desc = "MWAIT 0x60", |
646 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, | 654 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index da4c6979fbb8..aa26f3c3416b 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -56,7 +56,6 @@ config INFINIBAND_ADDR_TRANS | |||
56 | 56 | ||
57 | source "drivers/infiniband/hw/mthca/Kconfig" | 57 | source "drivers/infiniband/hw/mthca/Kconfig" |
58 | source "drivers/infiniband/hw/qib/Kconfig" | 58 | source "drivers/infiniband/hw/qib/Kconfig" |
59 | source "drivers/infiniband/hw/ehca/Kconfig" | ||
60 | source "drivers/infiniband/hw/cxgb3/Kconfig" | 59 | source "drivers/infiniband/hw/cxgb3/Kconfig" |
61 | source "drivers/infiniband/hw/cxgb4/Kconfig" | 60 | source "drivers/infiniband/hw/cxgb4/Kconfig" |
62 | source "drivers/infiniband/hw/mlx4/Kconfig" | 61 | source "drivers/infiniband/hw/mlx4/Kconfig" |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b1ab13f3e182..59a2dafc8c57 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, | |||
1232 | return true; | 1232 | return true; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) | ||
1236 | { | ||
1237 | enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); | ||
1238 | enum rdma_transport_type transport = | ||
1239 | rdma_node_get_transport(device->node_type); | ||
1240 | |||
1241 | return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; | ||
1242 | } | ||
1243 | |||
1244 | static bool cma_protocol_roce(const struct rdma_cm_id *id) | ||
1245 | { | ||
1246 | struct ib_device *device = id->device; | ||
1247 | const int port_num = id->port_num ?: rdma_start_port(device); | ||
1248 | |||
1249 | return cma_protocol_roce_dev_port(device, port_num); | ||
1250 | } | ||
1251 | |||
1235 | static bool cma_match_net_dev(const struct rdma_id_private *id_priv, | 1252 | static bool cma_match_net_dev(const struct rdma_id_private *id_priv, |
1236 | const struct net_device *net_dev) | 1253 | const struct net_device *net_dev) |
1237 | { | 1254 | { |
1238 | const struct rdma_addr *addr = &id_priv->id.route.addr; | 1255 | const struct rdma_addr *addr = &id_priv->id.route.addr; |
1239 | 1256 | ||
1240 | if (!net_dev) | 1257 | if (!net_dev) |
1241 | /* This request is an AF_IB request */ | 1258 | /* This request is an AF_IB request or a RoCE request */ |
1242 | return addr->src_addr.ss_family == AF_IB; | 1259 | return addr->src_addr.ss_family == AF_IB || |
1260 | cma_protocol_roce(&id_priv->id); | ||
1243 | 1261 | ||
1244 | return !addr->dev_addr.bound_dev_if || | 1262 | return !addr->dev_addr.bound_dev_if || |
1245 | (net_eq(dev_net(net_dev), &init_net) && | 1263 | (net_eq(dev_net(net_dev), &init_net) && |
@@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, | |||
1294 | if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { | 1312 | if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { |
1295 | /* Assuming the protocol is AF_IB */ | 1313 | /* Assuming the protocol is AF_IB */ |
1296 | *net_dev = NULL; | 1314 | *net_dev = NULL; |
1315 | } else if (cma_protocol_roce_dev_port(req.device, req.port)) { | ||
1316 | /* TODO find the net dev matching the request parameters | ||
1317 | * through the RoCE GID table */ | ||
1318 | *net_dev = NULL; | ||
1297 | } else { | 1319 | } else { |
1298 | return ERR_CAST(*net_dev); | 1320 | return ERR_CAST(*net_dev); |
1299 | } | 1321 | } |
@@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1593 | if (ret) | 1615 | if (ret) |
1594 | goto err; | 1616 | goto err; |
1595 | } else { | 1617 | } else { |
1596 | /* An AF_IB connection */ | 1618 | if (!cma_protocol_roce(listen_id) && |
1597 | WARN_ON_ONCE(ss_family != AF_IB); | 1619 | cma_any_addr(cma_src_addr(id_priv))) { |
1598 | 1620 | rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; | |
1599 | cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv), | 1621 | rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); |
1600 | &rt->addr.dev_addr); | 1622 | ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); |
1623 | } else if (!cma_any_addr(cma_src_addr(id_priv))) { | ||
1624 | ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); | ||
1625 | if (ret) | ||
1626 | goto err; | ||
1627 | } | ||
1601 | } | 1628 | } |
1602 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | 1629 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
1603 | 1630 | ||
@@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1635 | if (ret) | 1662 | if (ret) |
1636 | goto err; | 1663 | goto err; |
1637 | } else { | 1664 | } else { |
1638 | /* An AF_IB connection */ | 1665 | if (!cma_any_addr(cma_src_addr(id_priv))) { |
1639 | WARN_ON_ONCE(ss_family != AF_IB); | 1666 | ret = cma_translate_addr(cma_src_addr(id_priv), |
1640 | 1667 | &id->route.addr.dev_addr); | |
1641 | if (!cma_any_addr(cma_src_addr(id_priv))) | 1668 | if (ret) |
1642 | cma_translate_ib((struct sockaddr_ib *) | 1669 | goto err; |
1643 | cma_src_addr(id_priv), | 1670 | } |
1644 | &id->route.addr.dev_addr); | ||
1645 | } | 1671 | } |
1646 | 1672 | ||
1647 | id_priv->state = RDMA_CM_CONNECT; | 1673 | id_priv->state = RDMA_CM_CONNECT; |
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1bdb9996d371..aded2a5cc2d5 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile | |||
@@ -1,6 +1,5 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ | 1 | obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ |
2 | obj-$(CONFIG_INFINIBAND_QIB) += qib/ | 2 | obj-$(CONFIG_INFINIBAND_QIB) += qib/ |
3 | obj-$(CONFIG_INFINIBAND_EHCA) += ehca/ | ||
4 | obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ | 3 | obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ |
5 | obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ | 4 | obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ |
6 | obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ | 5 | obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
246 | if (MLX5_CAP_GEN(mdev, apm)) | 246 | if (MLX5_CAP_GEN(mdev, apm)) |
247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
248 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | ||
249 | if (MLX5_CAP_GEN(mdev, xrc)) | 248 | if (MLX5_CAP_GEN(mdev, xrc)) |
250 | props->device_cap_flags |= IB_DEVICE_XRC; | 249 | props->device_cap_flags |= IB_DEVICE_XRC; |
251 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 250 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm | |||
795 | return 0; | 794 | return 0; |
796 | } | 795 | } |
797 | 796 | ||
798 | static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) | ||
799 | { | ||
800 | struct mlx5_create_mkey_mbox_in *in; | ||
801 | struct mlx5_mkey_seg *seg; | ||
802 | struct mlx5_core_mr mr; | ||
803 | int err; | ||
804 | |||
805 | in = kzalloc(sizeof(*in), GFP_KERNEL); | ||
806 | if (!in) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | seg = &in->seg; | ||
810 | seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; | ||
811 | seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); | ||
812 | seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | ||
813 | seg->start_addr = 0; | ||
814 | |||
815 | err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), | ||
816 | NULL, NULL, NULL); | ||
817 | if (err) { | ||
818 | mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); | ||
819 | goto err_in; | ||
820 | } | ||
821 | |||
822 | kfree(in); | ||
823 | *key = mr.key; | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | err_in: | ||
828 | kfree(in); | ||
829 | |||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) | ||
834 | { | ||
835 | struct mlx5_core_mr mr; | ||
836 | int err; | ||
837 | |||
838 | memset(&mr, 0, sizeof(mr)); | ||
839 | mr.key = key; | ||
840 | err = mlx5_core_destroy_mkey(dev->mdev, &mr); | ||
841 | if (err) | ||
842 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); | ||
843 | } | ||
844 | |||
845 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | 797 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, |
846 | struct ib_ucontext *context, | 798 | struct ib_ucontext *context, |
847 | struct ib_udata *udata) | 799 | struct ib_udata *udata) |
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | |||
867 | kfree(pd); | 819 | kfree(pd); |
868 | return ERR_PTR(-EFAULT); | 820 | return ERR_PTR(-EFAULT); |
869 | } | 821 | } |
870 | } else { | ||
871 | err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); | ||
872 | if (err) { | ||
873 | mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); | ||
874 | kfree(pd); | ||
875 | return ERR_PTR(err); | ||
876 | } | ||
877 | } | 822 | } |
878 | 823 | ||
879 | return &pd->ibpd; | 824 | return &pd->ibpd; |
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) | |||
884 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | 829 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); |
885 | struct mlx5_ib_pd *mpd = to_mpd(pd); | 830 | struct mlx5_ib_pd *mpd = to_mpd(pd); |
886 | 831 | ||
887 | if (!pd->uobject) | ||
888 | free_pa_mkey(mdev, mpd->pa_lkey); | ||
889 | |||
890 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); | 832 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); |
891 | kfree(mpd); | 833 | kfree(mpd); |
892 | 834 | ||
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) | |||
1245 | struct ib_srq_init_attr attr; | 1187 | struct ib_srq_init_attr attr; |
1246 | struct mlx5_ib_dev *dev; | 1188 | struct mlx5_ib_dev *dev; |
1247 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; | 1189 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; |
1248 | u32 rsvd_lkey; | ||
1249 | int ret = 0; | 1190 | int ret = 0; |
1250 | 1191 | ||
1251 | dev = container_of(devr, struct mlx5_ib_dev, devr); | 1192 | dev = container_of(devr, struct mlx5_ib_dev, devr); |
1252 | 1193 | ||
1253 | ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); | ||
1254 | if (ret) { | ||
1255 | pr_err("Failed to query special context %d\n", ret); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | dev->ib_dev.local_dma_lkey = rsvd_lkey; | ||
1259 | |||
1260 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); | 1194 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); |
1261 | if (IS_ERR(devr->p0)) { | 1195 | if (IS_ERR(devr->p0)) { |
1262 | ret = PTR_ERR(devr->p0); | 1196 | ret = PTR_ERR(devr->p0); |
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1418 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); | 1352 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); |
1419 | dev->ib_dev.owner = THIS_MODULE; | 1353 | dev->ib_dev.owner = THIS_MODULE; |
1420 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | 1354 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
1355 | dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; | ||
1421 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); | 1356 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); |
1422 | dev->ib_dev.phys_port_cnt = dev->num_ports; | 1357 | dev->ib_dev.phys_port_cnt = dev->num_ports; |
1423 | dev->ib_dev.num_comp_vectors = | 1358 | dev->ib_dev.num_comp_vectors = |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte | |||
103 | struct mlx5_ib_pd { | 103 | struct mlx5_ib_pd { |
104 | struct ib_pd ibpd; | 104 | struct ib_pd ibpd; |
105 | u32 pdn; | 105 | u32 pdn; |
106 | u32 pa_lkey; | ||
107 | }; | 106 | }; |
108 | 107 | ||
109 | /* Use macros here so that don't have to duplicate | 108 | /* Use macros here so that don't have to duplicate |
@@ -213,7 +212,6 @@ struct mlx5_ib_qp { | |||
213 | int uuarn; | 212 | int uuarn; |
214 | 213 | ||
215 | int create_type; | 214 | int create_type; |
216 | u32 pa_lkey; | ||
217 | 215 | ||
218 | /* Store signature errors */ | 216 | /* Store signature errors */ |
219 | bool signature_en; | 217 | bool signature_en; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); | 925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); |
926 | if (err) | 926 | if (err) |
927 | mlx5_ib_dbg(dev, "err %d\n", err); | 927 | mlx5_ib_dbg(dev, "err %d\n", err); |
928 | else | ||
929 | qp->pa_lkey = to_mpd(pd)->pa_lkey; | ||
930 | } | 928 | } |
931 | 929 | ||
932 | if (err) | 930 | if (err) |
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, | |||
2045 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); | 2043 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); |
2046 | dseg->addr = cpu_to_be64(mfrpl->map); | 2044 | dseg->addr = cpu_to_be64(mfrpl->map); |
2047 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); | 2045 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); |
2048 | dseg->lkey = cpu_to_be32(pd->pa_lkey); | 2046 | dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); |
2049 | } | 2047 | } |
2050 | 2048 | ||
2051 | static __be32 send_ieth(struct ib_send_wr *wr) | 2049 | static __be32 send_ieth(struct ib_send_wr *wr) |
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h index 5be13d8991bc..f903502d3883 100644 --- a/drivers/infiniband/hw/usnic/usnic.h +++ b/drivers/infiniband/hw/usnic/usnic.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 04a66229584e..7fe9502ce8d3 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h index 393567266142..596e0ed49a8e 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h +++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h index 9d737ed5e55d..b54986de5f0c 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_util.h +++ b/drivers/infiniband/hw/usnic/usnic_common_util.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c index 5d13860161a4..5e55b8bc6fe4 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.c +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h index 4087d24a88f6..98453e91daa6 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.h +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index e3c9bd9d3ba3..3c37dd59c04e 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index 93713a2230b3..3a8add9ddf46 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h index e5a9297dd1bd..525bf272671e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib.h +++ b/drivers/infiniband/hw/usnic/usnic_ib.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 34c49b8105fe..0c15bd885035 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index db3588df3546..85dc3f989ff7 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h index b0aafe8db0c3..b1458be1d402 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 27dc67c1689f..3412ea06116e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h index 0d09b493cd02..3d98e16cfeaf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 7df43827cb29..f8e3211689a3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0bd04efa16f3..414eaa566bd9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h index 75777a66c684..183fcb6a952f 100644 --- a/drivers/infiniband/hw/usnic/usnic_log.h +++ b/drivers/infiniband/hw/usnic/usnic_log.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c index ddef6f77a78c..de318389a301 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.c +++ b/drivers/infiniband/hw/usnic/usnic_transport.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h index 7e5dc6d9f462..9a7a2d9755c0 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.h +++ b/drivers/infiniband/hw/usnic/usnic_transport.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index cb2337f0532b..645a5f6e6c88 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file | 8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the | 9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: | 10 | * BSD license below: |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or | 12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following | 13 | * without modification, are permitted provided that the following |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 70440996e8f2..45ca7c1613a7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 3a4288e0fbac..42b4b4c4e452 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index d4f752e258fd..c0b0b876ab90 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c index 656b88c39eda..66de93fb8ea9 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.c +++ b/drivers/infiniband/hw/usnic/usnic_vnic.c | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h index 14d931a8829d..a08423e478af 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.h +++ b/drivers/infiniband/hw/usnic/usnic_vnic.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you may redistribute it and/or modify | 4 | * This software is available to you under a choice of one of two |
5 | * it under the terms of the GNU General Public License as published by | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * the Free Software Foundation; version 2 of the License. | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
7 | * | 22 | * |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..edc5b8565d6d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -80,7 +80,7 @@ enum { | |||
80 | IPOIB_NUM_WC = 4, | 80 | IPOIB_NUM_WC = 4, |
81 | 81 | ||
82 | IPOIB_MAX_PATH_REC_QUEUE = 3, | 82 | IPOIB_MAX_PATH_REC_QUEUE = 3, |
83 | IPOIB_MAX_MCAST_QUEUE = 3, | 83 | IPOIB_MAX_MCAST_QUEUE = 64, |
84 | 84 | ||
85 | IPOIB_FLAG_OPER_UP = 0, | 85 | IPOIB_FLAG_OPER_UP = 0, |
86 | IPOIB_FLAG_INITIALIZED = 1, | 86 | IPOIB_FLAG_INITIALIZED = 1, |
@@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev); | |||
495 | void ipoib_mcast_join_task(struct work_struct *work); | 495 | void ipoib_mcast_join_task(struct work_struct *work); |
496 | void ipoib_mcast_carrier_on_task(struct work_struct *work); | 496 | void ipoib_mcast_carrier_on_task(struct work_struct *work); |
497 | void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); | 497 | void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); |
498 | void ipoib_mcast_free(struct ipoib_mcast *mc); | ||
498 | 499 | ||
499 | void ipoib_mcast_restart_task(struct work_struct *work); | 500 | void ipoib_mcast_restart_task(struct work_struct *work); |
500 | int ipoib_mcast_start_thread(struct net_device *dev); | 501 | int ipoib_mcast_start_thread(struct net_device *dev); |
@@ -548,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, | |||
548 | 549 | ||
549 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, | 550 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, |
550 | union ib_gid *mgid, int set_qkey); | 551 | union ib_gid *mgid, int set_qkey); |
552 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); | ||
553 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); | ||
551 | 554 | ||
552 | int ipoib_init_qp(struct net_device *dev); | 555 | int ipoib_init_qp(struct net_device *dev); |
553 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); | 556 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..babba05d7a0e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1149 | unsigned long dt; | 1149 | unsigned long dt; |
1150 | unsigned long flags; | 1150 | unsigned long flags; |
1151 | int i; | 1151 | int i; |
1152 | LIST_HEAD(remove_list); | ||
1153 | struct ipoib_mcast *mcast, *tmcast; | ||
1154 | struct net_device *dev = priv->dev; | ||
1152 | 1155 | ||
1153 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) | 1156 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) |
1154 | return; | 1157 | return; |
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1176 | lockdep_is_held(&priv->lock))) != NULL) { | 1179 | lockdep_is_held(&priv->lock))) != NULL) { |
1177 | /* was the neigh idle for two GC periods */ | 1180 | /* was the neigh idle for two GC periods */ |
1178 | if (time_after(neigh_obsolete, neigh->alive)) { | 1181 | if (time_after(neigh_obsolete, neigh->alive)) { |
1182 | u8 *mgid = neigh->daddr + 4; | ||
1183 | |||
1184 | /* Is this multicast ? */ | ||
1185 | if (*mgid == 0xff) { | ||
1186 | mcast = __ipoib_mcast_find(dev, mgid); | ||
1187 | |||
1188 | if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | ||
1189 | list_del(&mcast->list); | ||
1190 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | ||
1191 | list_add_tail(&mcast->list, &remove_list); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1179 | rcu_assign_pointer(*np, | 1195 | rcu_assign_pointer(*np, |
1180 | rcu_dereference_protected(neigh->hnext, | 1196 | rcu_dereference_protected(neigh->hnext, |
1181 | lockdep_is_held(&priv->lock))); | 1197 | lockdep_is_held(&priv->lock))); |
@@ -1191,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1191 | 1207 | ||
1192 | out_unlock: | 1208 | out_unlock: |
1193 | spin_unlock_irqrestore(&priv->lock, flags); | 1209 | spin_unlock_irqrestore(&priv->lock, flags); |
1210 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | ||
1211 | ipoib_mcast_leave(dev, mcast); | ||
1212 | ipoib_mcast_free(mcast); | ||
1213 | } | ||
1194 | } | 1214 | } |
1195 | 1215 | ||
1196 | static void ipoib_reap_neigh(struct work_struct *work) | 1216 | static void ipoib_reap_neigh(struct work_struct *work) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..d750a86042f3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, | |||
106 | queue_delayed_work(priv->wq, &priv->mcast_task, 0); | 106 | queue_delayed_work(priv->wq, &priv->mcast_task, 0); |
107 | } | 107 | } |
108 | 108 | ||
109 | static void ipoib_mcast_free(struct ipoib_mcast *mcast) | 109 | void ipoib_mcast_free(struct ipoib_mcast *mcast) |
110 | { | 110 | { |
111 | struct net_device *dev = mcast->dev; | 111 | struct net_device *dev = mcast->dev; |
112 | int tx_dropped = 0; | 112 | int tx_dropped = 0; |
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |||
153 | return mcast; | 153 | return mcast; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) | 156 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) |
157 | { | 157 | { |
158 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 158 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
159 | struct rb_node *n = priv->multicast_tree.rb_node; | 159 | struct rb_node *n = priv->multicast_tree.rb_node; |
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; | 508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Historically Linux IPoIB has never properly supported SEND | 511 | * Send-only IB Multicast joins do not work at the core |
512 | * ONLY join. It emulated it by not providing all the required | 512 | * IB layer yet, so we can't use them here. However, |
513 | * attributes, which is enough to prevent group creation and | 513 | * we are emulating an Ethernet multicast send, which |
514 | * detect if there are full members or not. A major problem | 514 | * does not require a multicast subscription and will |
515 | * with supporting SEND ONLY is detecting when the group is | 515 | * still send properly. The most appropriate thing to |
516 | * auto-destroyed as IPoIB will cache the MLID.. | 516 | * do is to create the group if it doesn't exist as that |
517 | * most closely emulates the behavior, from a user space | ||
518 | * application perspecitive, of Ethernet multicast | ||
519 | * operation. For now, we do a full join, maybe later | ||
520 | * when the core IB layers support send only joins we | ||
521 | * will use them. | ||
517 | */ | 522 | */ |
518 | #if 1 | 523 | #if 0 |
519 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | ||
520 | comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | ||
521 | #else | ||
522 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | 524 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) |
523 | rec.join_state = 4; | 525 | rec.join_state = 4; |
524 | #endif | 526 | #endif |
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) | |||
675 | return 0; | 677 | return 0; |
676 | } | 678 | } |
677 | 679 | ||
678 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | 680 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) |
679 | { | 681 | { |
680 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 682 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
681 | int ret = 0; | 683 | int ret = 0; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; | |||
97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); | 97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); |
98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); | 98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); |
99 | 99 | ||
100 | bool iser_always_reg = true; | ||
101 | module_param_named(always_register, iser_always_reg, bool, S_IRUGO); | ||
102 | MODULE_PARM_DESC(always_register, | ||
103 | "Always register memory, even for continuous memory regions (default:true)"); | ||
104 | |||
100 | bool iser_pi_enable = false; | 105 | bool iser_pi_enable = false; |
101 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); | 106 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); |
102 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | 107 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -611,6 +611,7 @@ extern int iser_debug_level; | |||
611 | extern bool iser_pi_enable; | 611 | extern bool iser_pi_enable; |
612 | extern int iser_pi_guard; | 612 | extern int iser_pi_guard; |
613 | extern unsigned int iser_max_sectors; | 613 | extern unsigned int iser_max_sectors; |
614 | extern bool iser_always_reg; | ||
614 | 615 | ||
615 | int iser_assign_reg_ops(struct iser_device *device); | 616 | int iser_assign_reg_ops(struct iser_device *device); |
616 | 617 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -803,11 +803,12 @@ static int | |||
803 | iser_reg_prot_sg(struct iscsi_iser_task *task, | 803 | iser_reg_prot_sg(struct iscsi_iser_task *task, |
804 | struct iser_data_buf *mem, | 804 | struct iser_data_buf *mem, |
805 | struct iser_fr_desc *desc, | 805 | struct iser_fr_desc *desc, |
806 | bool use_dma_key, | ||
806 | struct iser_mem_reg *reg) | 807 | struct iser_mem_reg *reg) |
807 | { | 808 | { |
808 | struct iser_device *device = task->iser_conn->ib_conn.device; | 809 | struct iser_device *device = task->iser_conn->ib_conn.device; |
809 | 810 | ||
810 | if (mem->dma_nents == 1) | 811 | if (use_dma_key) |
811 | return iser_reg_dma(device, mem, reg); | 812 | return iser_reg_dma(device, mem, reg); |
812 | 813 | ||
813 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); | 814 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); |
@@ -817,11 +818,12 @@ static int | |||
817 | iser_reg_data_sg(struct iscsi_iser_task *task, | 818 | iser_reg_data_sg(struct iscsi_iser_task *task, |
818 | struct iser_data_buf *mem, | 819 | struct iser_data_buf *mem, |
819 | struct iser_fr_desc *desc, | 820 | struct iser_fr_desc *desc, |
821 | bool use_dma_key, | ||
820 | struct iser_mem_reg *reg) | 822 | struct iser_mem_reg *reg) |
821 | { | 823 | { |
822 | struct iser_device *device = task->iser_conn->ib_conn.device; | 824 | struct iser_device *device = task->iser_conn->ib_conn.device; |
823 | 825 | ||
824 | if (mem->dma_nents == 1) | 826 | if (use_dma_key) |
825 | return iser_reg_dma(device, mem, reg); | 827 | return iser_reg_dma(device, mem, reg); |
826 | 828 | ||
827 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); | 829 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); |
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
836 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; | 838 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; |
837 | struct iser_mem_reg *data_reg; | 839 | struct iser_mem_reg *data_reg; |
838 | struct iser_fr_desc *desc = NULL; | 840 | struct iser_fr_desc *desc = NULL; |
841 | bool use_dma_key; | ||
839 | int err; | 842 | int err; |
840 | 843 | ||
841 | err = iser_handle_unaligned_buf(task, mem, dir); | 844 | err = iser_handle_unaligned_buf(task, mem, dir); |
842 | if (unlikely(err)) | 845 | if (unlikely(err)) |
843 | return err; | 846 | return err; |
844 | 847 | ||
845 | if (mem->dma_nents != 1 || | 848 | use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && |
846 | scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { | 849 | scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); |
850 | |||
851 | if (!use_dma_key) { | ||
847 | desc = device->reg_ops->reg_desc_get(ib_conn); | 852 | desc = device->reg_ops->reg_desc_get(ib_conn); |
848 | reg->mem_h = desc; | 853 | reg->mem_h = desc; |
849 | } | 854 | } |
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
853 | else | 858 | else |
854 | data_reg = &task->desc.data_reg; | 859 | data_reg = &task->desc.data_reg; |
855 | 860 | ||
856 | err = iser_reg_data_sg(task, mem, desc, data_reg); | 861 | err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); |
857 | if (unlikely(err)) | 862 | if (unlikely(err)) |
858 | goto err_reg; | 863 | goto err_reg; |
859 | 864 | ||
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
866 | if (unlikely(err)) | 871 | if (unlikely(err)) |
867 | goto err_reg; | 872 | goto err_reg; |
868 | 873 | ||
869 | err = iser_reg_prot_sg(task, mem, desc, prot_reg); | 874 | err = iser_reg_prot_sg(task, mem, desc, |
875 | use_dma_key, prot_reg); | ||
870 | if (unlikely(err)) | 876 | if (unlikely(err)) |
871 | goto err_reg; | 877 | goto err_reg; |
872 | } | 878 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
133 | (unsigned long)comp); | 133 | (unsigned long)comp); |
134 | } | 134 | } |
135 | 135 | ||
136 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | | 136 | if (!iser_always_reg) { |
137 | IB_ACCESS_REMOTE_WRITE | | 137 | int access = IB_ACCESS_LOCAL_WRITE | |
138 | IB_ACCESS_REMOTE_READ); | 138 | IB_ACCESS_REMOTE_WRITE | |
139 | if (IS_ERR(device->mr)) | 139 | IB_ACCESS_REMOTE_READ; |
140 | goto dma_mr_err; | 140 | |
141 | device->mr = ib_get_dma_mr(device->pd, access); | ||
142 | if (IS_ERR(device->mr)) | ||
143 | goto dma_mr_err; | ||
144 | } | ||
141 | 145 | ||
142 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, | 146 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, |
143 | iser_event_handler); | 147 | iser_event_handler); |
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
147 | return 0; | 151 | return 0; |
148 | 152 | ||
149 | handler_err: | 153 | handler_err: |
150 | ib_dereg_mr(device->mr); | 154 | if (device->mr) |
155 | ib_dereg_mr(device->mr); | ||
151 | dma_mr_err: | 156 | dma_mr_err: |
152 | for (i = 0; i < device->comps_used; i++) | 157 | for (i = 0; i < device->comps_used; i++) |
153 | tasklet_kill(&device->comps[i].tasklet); | 158 | tasklet_kill(&device->comps[i].tasklet); |
@@ -173,7 +178,6 @@ comps_err: | |||
173 | static void iser_free_device_ib_res(struct iser_device *device) | 178 | static void iser_free_device_ib_res(struct iser_device *device) |
174 | { | 179 | { |
175 | int i; | 180 | int i; |
176 | BUG_ON(device->mr == NULL); | ||
177 | 181 | ||
178 | for (i = 0; i < device->comps_used; i++) { | 182 | for (i = 0; i < device->comps_used; i++) { |
179 | struct iser_comp *comp = &device->comps[i]; | 183 | struct iser_comp *comp = &device->comps[i]; |
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
184 | } | 188 | } |
185 | 189 | ||
186 | (void)ib_unregister_event_handler(&device->event_handler); | 190 | (void)ib_unregister_event_handler(&device->event_handler); |
187 | (void)ib_dereg_mr(device->mr); | 191 | if (device->mr) |
192 | (void)ib_dereg_mr(device->mr); | ||
188 | ib_dealloc_pd(device->pd); | 193 | ib_dealloc_pd(device->pd); |
189 | 194 | ||
190 | kfree(device->comps); | 195 | kfree(device->comps); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 403bd29443b8..aa59037d7504 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -238,8 +238,6 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) | |||
238 | rx_sg->lkey = device->pd->local_dma_lkey; | 238 | rx_sg->lkey = device->pd->local_dma_lkey; |
239 | } | 239 | } |
240 | 240 | ||
241 | isert_conn->rx_desc_head = 0; | ||
242 | |||
243 | return 0; | 241 | return 0; |
244 | 242 | ||
245 | dma_map_fail: | 243 | dma_map_fail: |
@@ -634,7 +632,7 @@ static void | |||
634 | isert_init_conn(struct isert_conn *isert_conn) | 632 | isert_init_conn(struct isert_conn *isert_conn) |
635 | { | 633 | { |
636 | isert_conn->state = ISER_CONN_INIT; | 634 | isert_conn->state = ISER_CONN_INIT; |
637 | INIT_LIST_HEAD(&isert_conn->accept_node); | 635 | INIT_LIST_HEAD(&isert_conn->node); |
638 | init_completion(&isert_conn->login_comp); | 636 | init_completion(&isert_conn->login_comp); |
639 | init_completion(&isert_conn->login_req_comp); | 637 | init_completion(&isert_conn->login_req_comp); |
640 | init_completion(&isert_conn->wait); | 638 | init_completion(&isert_conn->wait); |
@@ -762,28 +760,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
762 | ret = isert_rdma_post_recvl(isert_conn); | 760 | ret = isert_rdma_post_recvl(isert_conn); |
763 | if (ret) | 761 | if (ret) |
764 | goto out_conn_dev; | 762 | goto out_conn_dev; |
765 | /* | ||
766 | * Obtain the second reference now before isert_rdma_accept() to | ||
767 | * ensure that any initiator generated REJECT CM event that occurs | ||
768 | * asynchronously won't drop the last reference until the error path | ||
769 | * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() -> | ||
770 | * isert_free_conn() -> isert_put_conn() -> kref_put(). | ||
771 | */ | ||
772 | if (!kref_get_unless_zero(&isert_conn->kref)) { | ||
773 | isert_warn("conn %p connect_release is running\n", isert_conn); | ||
774 | goto out_conn_dev; | ||
775 | } | ||
776 | 763 | ||
777 | ret = isert_rdma_accept(isert_conn); | 764 | ret = isert_rdma_accept(isert_conn); |
778 | if (ret) | 765 | if (ret) |
779 | goto out_conn_dev; | 766 | goto out_conn_dev; |
780 | 767 | ||
781 | mutex_lock(&isert_np->np_accept_mutex); | 768 | mutex_lock(&isert_np->mutex); |
782 | list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); | 769 | list_add_tail(&isert_conn->node, &isert_np->accepted); |
783 | mutex_unlock(&isert_np->np_accept_mutex); | 770 | mutex_unlock(&isert_np->mutex); |
784 | 771 | ||
785 | isert_info("np %p: Allow accept_np to continue\n", np); | ||
786 | up(&isert_np->np_sem); | ||
787 | return 0; | 772 | return 0; |
788 | 773 | ||
789 | out_conn_dev: | 774 | out_conn_dev: |
@@ -831,13 +816,21 @@ static void | |||
831 | isert_connected_handler(struct rdma_cm_id *cma_id) | 816 | isert_connected_handler(struct rdma_cm_id *cma_id) |
832 | { | 817 | { |
833 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | 818 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
819 | struct isert_np *isert_np = cma_id->context; | ||
834 | 820 | ||
835 | isert_info("conn %p\n", isert_conn); | 821 | isert_info("conn %p\n", isert_conn); |
836 | 822 | ||
837 | mutex_lock(&isert_conn->mutex); | 823 | mutex_lock(&isert_conn->mutex); |
838 | if (isert_conn->state != ISER_CONN_FULL_FEATURE) | 824 | isert_conn->state = ISER_CONN_UP; |
839 | isert_conn->state = ISER_CONN_UP; | 825 | kref_get(&isert_conn->kref); |
840 | mutex_unlock(&isert_conn->mutex); | 826 | mutex_unlock(&isert_conn->mutex); |
827 | |||
828 | mutex_lock(&isert_np->mutex); | ||
829 | list_move_tail(&isert_conn->node, &isert_np->pending); | ||
830 | mutex_unlock(&isert_np->mutex); | ||
831 | |||
832 | isert_info("np %p: Allow accept_np to continue\n", isert_np); | ||
833 | up(&isert_np->sem); | ||
841 | } | 834 | } |
842 | 835 | ||
843 | static void | 836 | static void |
@@ -903,14 +896,14 @@ isert_np_cma_handler(struct isert_np *isert_np, | |||
903 | 896 | ||
904 | switch (event) { | 897 | switch (event) { |
905 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 898 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
906 | isert_np->np_cm_id = NULL; | 899 | isert_np->cm_id = NULL; |
907 | break; | 900 | break; |
908 | case RDMA_CM_EVENT_ADDR_CHANGE: | 901 | case RDMA_CM_EVENT_ADDR_CHANGE: |
909 | isert_np->np_cm_id = isert_setup_id(isert_np); | 902 | isert_np->cm_id = isert_setup_id(isert_np); |
910 | if (IS_ERR(isert_np->np_cm_id)) { | 903 | if (IS_ERR(isert_np->cm_id)) { |
911 | isert_err("isert np %p setup id failed: %ld\n", | 904 | isert_err("isert np %p setup id failed: %ld\n", |
912 | isert_np, PTR_ERR(isert_np->np_cm_id)); | 905 | isert_np, PTR_ERR(isert_np->cm_id)); |
913 | isert_np->np_cm_id = NULL; | 906 | isert_np->cm_id = NULL; |
914 | } | 907 | } |
915 | break; | 908 | break; |
916 | default: | 909 | default: |
@@ -929,7 +922,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, | |||
929 | struct isert_conn *isert_conn; | 922 | struct isert_conn *isert_conn; |
930 | bool terminating = false; | 923 | bool terminating = false; |
931 | 924 | ||
932 | if (isert_np->np_cm_id == cma_id) | 925 | if (isert_np->cm_id == cma_id) |
933 | return isert_np_cma_handler(cma_id->context, event); | 926 | return isert_np_cma_handler(cma_id->context, event); |
934 | 927 | ||
935 | isert_conn = cma_id->qp->qp_context; | 928 | isert_conn = cma_id->qp->qp_context; |
@@ -945,13 +938,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, | |||
945 | if (terminating) | 938 | if (terminating) |
946 | goto out; | 939 | goto out; |
947 | 940 | ||
948 | mutex_lock(&isert_np->np_accept_mutex); | 941 | mutex_lock(&isert_np->mutex); |
949 | if (!list_empty(&isert_conn->accept_node)) { | 942 | if (!list_empty(&isert_conn->node)) { |
950 | list_del_init(&isert_conn->accept_node); | 943 | list_del_init(&isert_conn->node); |
951 | isert_put_conn(isert_conn); | 944 | isert_put_conn(isert_conn); |
952 | queue_work(isert_release_wq, &isert_conn->release_work); | 945 | queue_work(isert_release_wq, &isert_conn->release_work); |
953 | } | 946 | } |
954 | mutex_unlock(&isert_np->np_accept_mutex); | 947 | mutex_unlock(&isert_np->mutex); |
955 | 948 | ||
956 | out: | 949 | out: |
957 | return 0; | 950 | return 0; |
@@ -962,6 +955,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) | |||
962 | { | 955 | { |
963 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | 956 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
964 | 957 | ||
958 | list_del_init(&isert_conn->node); | ||
965 | isert_conn->cm_id = NULL; | 959 | isert_conn->cm_id = NULL; |
966 | isert_put_conn(isert_conn); | 960 | isert_put_conn(isert_conn); |
967 | 961 | ||
@@ -1006,35 +1000,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
1006 | } | 1000 | } |
1007 | 1001 | ||
1008 | static int | 1002 | static int |
1009 | isert_post_recv(struct isert_conn *isert_conn, u32 count) | 1003 | isert_post_recvm(struct isert_conn *isert_conn, u32 count) |
1010 | { | 1004 | { |
1011 | struct ib_recv_wr *rx_wr, *rx_wr_failed; | 1005 | struct ib_recv_wr *rx_wr, *rx_wr_failed; |
1012 | int i, ret; | 1006 | int i, ret; |
1013 | unsigned int rx_head = isert_conn->rx_desc_head; | ||
1014 | struct iser_rx_desc *rx_desc; | 1007 | struct iser_rx_desc *rx_desc; |
1015 | 1008 | ||
1016 | for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { | 1009 | for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { |
1017 | rx_desc = &isert_conn->rx_descs[rx_head]; | 1010 | rx_desc = &isert_conn->rx_descs[i]; |
1018 | rx_wr->wr_id = (uintptr_t)rx_desc; | 1011 | rx_wr->wr_id = (uintptr_t)rx_desc; |
1019 | rx_wr->sg_list = &rx_desc->rx_sg; | 1012 | rx_wr->sg_list = &rx_desc->rx_sg; |
1020 | rx_wr->num_sge = 1; | 1013 | rx_wr->num_sge = 1; |
1021 | rx_wr->next = rx_wr + 1; | 1014 | rx_wr->next = rx_wr + 1; |
1022 | rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); | ||
1023 | } | 1015 | } |
1024 | |||
1025 | rx_wr--; | 1016 | rx_wr--; |
1026 | rx_wr->next = NULL; /* mark end of work requests list */ | 1017 | rx_wr->next = NULL; /* mark end of work requests list */ |
1027 | 1018 | ||
1028 | isert_conn->post_recv_buf_count += count; | 1019 | isert_conn->post_recv_buf_count += count; |
1029 | ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, | 1020 | ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, |
1030 | &rx_wr_failed); | 1021 | &rx_wr_failed); |
1031 | if (ret) { | 1022 | if (ret) { |
1032 | isert_err("ib_post_recv() failed with ret: %d\n", ret); | 1023 | isert_err("ib_post_recv() failed with ret: %d\n", ret); |
1033 | isert_conn->post_recv_buf_count -= count; | 1024 | isert_conn->post_recv_buf_count -= count; |
1034 | } else { | ||
1035 | isert_dbg("Posted %d RX buffers\n", count); | ||
1036 | isert_conn->rx_desc_head = rx_head; | ||
1037 | } | 1025 | } |
1026 | |||
1027 | return ret; | ||
1028 | } | ||
1029 | |||
1030 | static int | ||
1031 | isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) | ||
1032 | { | ||
1033 | struct ib_recv_wr *rx_wr_failed, rx_wr; | ||
1034 | int ret; | ||
1035 | |||
1036 | rx_wr.wr_id = (uintptr_t)rx_desc; | ||
1037 | rx_wr.sg_list = &rx_desc->rx_sg; | ||
1038 | rx_wr.num_sge = 1; | ||
1039 | rx_wr.next = NULL; | ||
1040 | |||
1041 | isert_conn->post_recv_buf_count++; | ||
1042 | ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); | ||
1043 | if (ret) { | ||
1044 | isert_err("ib_post_recv() failed with ret: %d\n", ret); | ||
1045 | isert_conn->post_recv_buf_count--; | ||
1046 | } | ||
1047 | |||
1038 | return ret; | 1048 | return ret; |
1039 | } | 1049 | } |
1040 | 1050 | ||
@@ -1205,7 +1215,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1205 | if (ret) | 1215 | if (ret) |
1206 | return ret; | 1216 | return ret; |
1207 | 1217 | ||
1208 | ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); | 1218 | ret = isert_post_recvm(isert_conn, |
1219 | ISERT_QP_MAX_RECV_DTOS); | ||
1209 | if (ret) | 1220 | if (ret) |
1210 | return ret; | 1221 | return ret; |
1211 | 1222 | ||
@@ -1278,7 +1289,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) | |||
1278 | } | 1289 | } |
1279 | 1290 | ||
1280 | static struct iscsi_cmd | 1291 | static struct iscsi_cmd |
1281 | *isert_allocate_cmd(struct iscsi_conn *conn) | 1292 | *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) |
1282 | { | 1293 | { |
1283 | struct isert_conn *isert_conn = conn->context; | 1294 | struct isert_conn *isert_conn = conn->context; |
1284 | struct isert_cmd *isert_cmd; | 1295 | struct isert_cmd *isert_cmd; |
@@ -1292,6 +1303,7 @@ static struct iscsi_cmd | |||
1292 | isert_cmd = iscsit_priv_cmd(cmd); | 1303 | isert_cmd = iscsit_priv_cmd(cmd); |
1293 | isert_cmd->conn = isert_conn; | 1304 | isert_cmd->conn = isert_conn; |
1294 | isert_cmd->iscsi_cmd = cmd; | 1305 | isert_cmd->iscsi_cmd = cmd; |
1306 | isert_cmd->rx_desc = rx_desc; | ||
1295 | 1307 | ||
1296 | return cmd; | 1308 | return cmd; |
1297 | } | 1309 | } |
@@ -1303,9 +1315,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, | |||
1303 | { | 1315 | { |
1304 | struct iscsi_conn *conn = isert_conn->conn; | 1316 | struct iscsi_conn *conn = isert_conn->conn; |
1305 | struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; | 1317 | struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; |
1306 | struct scatterlist *sg; | ||
1307 | int imm_data, imm_data_len, unsol_data, sg_nents, rc; | 1318 | int imm_data, imm_data_len, unsol_data, sg_nents, rc; |
1308 | bool dump_payload = false; | 1319 | bool dump_payload = false; |
1320 | unsigned int data_len; | ||
1309 | 1321 | ||
1310 | rc = iscsit_setup_scsi_cmd(conn, cmd, buf); | 1322 | rc = iscsit_setup_scsi_cmd(conn, cmd, buf); |
1311 | if (rc < 0) | 1323 | if (rc < 0) |
@@ -1314,7 +1326,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, | |||
1314 | imm_data = cmd->immediate_data; | 1326 | imm_data = cmd->immediate_data; |
1315 | imm_data_len = cmd->first_burst_len; | 1327 | imm_data_len = cmd->first_burst_len; |
1316 | unsol_data = cmd->unsolicited_data; | 1328 | unsol_data = cmd->unsolicited_data; |
1329 | data_len = cmd->se_cmd.data_length; | ||
1317 | 1330 | ||
1331 | if (imm_data && imm_data_len == data_len) | ||
1332 | cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | ||
1318 | rc = iscsit_process_scsi_cmd(conn, cmd, hdr); | 1333 | rc = iscsit_process_scsi_cmd(conn, cmd, hdr); |
1319 | if (rc < 0) { | 1334 | if (rc < 0) { |
1320 | return 0; | 1335 | return 0; |
@@ -1326,13 +1341,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, | |||
1326 | if (!imm_data) | 1341 | if (!imm_data) |
1327 | return 0; | 1342 | return 0; |
1328 | 1343 | ||
1329 | sg = &cmd->se_cmd.t_data_sg[0]; | 1344 | if (imm_data_len != data_len) { |
1330 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); | 1345 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); |
1331 | 1346 | sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, | |
1332 | isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", | 1347 | &rx_desc->data[0], imm_data_len); |
1333 | sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1348 | isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", |
1334 | 1349 | sg_nents, imm_data_len); | |
1335 | sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1350 | } else { |
1351 | sg_init_table(&isert_cmd->sg, 1); | ||
1352 | cmd->se_cmd.t_data_sg = &isert_cmd->sg; | ||
1353 | cmd->se_cmd.t_data_nents = 1; | ||
1354 | sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); | ||
1355 | isert_dbg("Transfer Immediate imm_data_len: %d\n", | ||
1356 | imm_data_len); | ||
1357 | } | ||
1336 | 1358 | ||
1337 | cmd->write_data_done += imm_data_len; | 1359 | cmd->write_data_done += imm_data_len; |
1338 | 1360 | ||
@@ -1407,6 +1429,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, | |||
1407 | if (rc < 0) | 1429 | if (rc < 0) |
1408 | return rc; | 1430 | return rc; |
1409 | 1431 | ||
1432 | /* | ||
1433 | * multiple data-outs on the same command can arrive - | ||
1434 | * so post the buffer before hand | ||
1435 | */ | ||
1436 | rc = isert_post_recv(isert_conn, rx_desc); | ||
1437 | if (rc) { | ||
1438 | isert_err("ib_post_recv failed with %d\n", rc); | ||
1439 | return rc; | ||
1440 | } | ||
1410 | return 0; | 1441 | return 0; |
1411 | } | 1442 | } |
1412 | 1443 | ||
@@ -1479,7 +1510,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1479 | 1510 | ||
1480 | switch (opcode) { | 1511 | switch (opcode) { |
1481 | case ISCSI_OP_SCSI_CMD: | 1512 | case ISCSI_OP_SCSI_CMD: |
1482 | cmd = isert_allocate_cmd(conn); | 1513 | cmd = isert_allocate_cmd(conn, rx_desc); |
1483 | if (!cmd) | 1514 | if (!cmd) |
1484 | break; | 1515 | break; |
1485 | 1516 | ||
@@ -1493,7 +1524,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1493 | rx_desc, (unsigned char *)hdr); | 1524 | rx_desc, (unsigned char *)hdr); |
1494 | break; | 1525 | break; |
1495 | case ISCSI_OP_NOOP_OUT: | 1526 | case ISCSI_OP_NOOP_OUT: |
1496 | cmd = isert_allocate_cmd(conn); | 1527 | cmd = isert_allocate_cmd(conn, rx_desc); |
1497 | if (!cmd) | 1528 | if (!cmd) |
1498 | break; | 1529 | break; |
1499 | 1530 | ||
@@ -1506,7 +1537,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1506 | (unsigned char *)hdr); | 1537 | (unsigned char *)hdr); |
1507 | break; | 1538 | break; |
1508 | case ISCSI_OP_SCSI_TMFUNC: | 1539 | case ISCSI_OP_SCSI_TMFUNC: |
1509 | cmd = isert_allocate_cmd(conn); | 1540 | cmd = isert_allocate_cmd(conn, rx_desc); |
1510 | if (!cmd) | 1541 | if (!cmd) |
1511 | break; | 1542 | break; |
1512 | 1543 | ||
@@ -1514,22 +1545,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1514 | (unsigned char *)hdr); | 1545 | (unsigned char *)hdr); |
1515 | break; | 1546 | break; |
1516 | case ISCSI_OP_LOGOUT: | 1547 | case ISCSI_OP_LOGOUT: |
1517 | cmd = isert_allocate_cmd(conn); | 1548 | cmd = isert_allocate_cmd(conn, rx_desc); |
1518 | if (!cmd) | 1549 | if (!cmd) |
1519 | break; | 1550 | break; |
1520 | 1551 | ||
1521 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); | 1552 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); |
1522 | break; | 1553 | break; |
1523 | case ISCSI_OP_TEXT: | 1554 | case ISCSI_OP_TEXT: |
1524 | if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { | 1555 | if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) |
1525 | cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); | 1556 | cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); |
1526 | if (!cmd) | 1557 | else |
1527 | break; | 1558 | cmd = isert_allocate_cmd(conn, rx_desc); |
1528 | } else { | 1559 | |
1529 | cmd = isert_allocate_cmd(conn); | 1560 | if (!cmd) |
1530 | if (!cmd) | 1561 | break; |
1531 | break; | ||
1532 | } | ||
1533 | 1562 | ||
1534 | isert_cmd = iscsit_priv_cmd(cmd); | 1563 | isert_cmd = iscsit_priv_cmd(cmd); |
1535 | ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, | 1564 | ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, |
@@ -1589,7 +1618,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, | |||
1589 | struct ib_device *ib_dev = isert_conn->cm_id->device; | 1618 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1590 | struct iscsi_hdr *hdr; | 1619 | struct iscsi_hdr *hdr; |
1591 | u64 rx_dma; | 1620 | u64 rx_dma; |
1592 | int rx_buflen, outstanding; | 1621 | int rx_buflen; |
1593 | 1622 | ||
1594 | if ((char *)desc == isert_conn->login_req_buf) { | 1623 | if ((char *)desc == isert_conn->login_req_buf) { |
1595 | rx_dma = isert_conn->login_req_dma; | 1624 | rx_dma = isert_conn->login_req_dma; |
@@ -1629,22 +1658,6 @@ isert_rcv_completion(struct iser_rx_desc *desc, | |||
1629 | DMA_FROM_DEVICE); | 1658 | DMA_FROM_DEVICE); |
1630 | 1659 | ||
1631 | isert_conn->post_recv_buf_count--; | 1660 | isert_conn->post_recv_buf_count--; |
1632 | isert_dbg("Decremented post_recv_buf_count: %d\n", | ||
1633 | isert_conn->post_recv_buf_count); | ||
1634 | |||
1635 | if ((char *)desc == isert_conn->login_req_buf) | ||
1636 | return; | ||
1637 | |||
1638 | outstanding = isert_conn->post_recv_buf_count; | ||
1639 | if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { | ||
1640 | int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, | ||
1641 | ISERT_MIN_POSTED_RX); | ||
1642 | err = isert_post_recv(isert_conn, count); | ||
1643 | if (err) { | ||
1644 | isert_err("isert_post_recv() count: %d failed, %d\n", | ||
1645 | count, err); | ||
1646 | } | ||
1647 | } | ||
1648 | } | 1661 | } |
1649 | 1662 | ||
1650 | static int | 1663 | static int |
@@ -2156,6 +2169,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) | |||
2156 | struct ib_send_wr *wr_failed; | 2169 | struct ib_send_wr *wr_failed; |
2157 | int ret; | 2170 | int ret; |
2158 | 2171 | ||
2172 | ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); | ||
2173 | if (ret) { | ||
2174 | isert_err("ib_post_recv failed with %d\n", ret); | ||
2175 | return ret; | ||
2176 | } | ||
2177 | |||
2159 | ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, | 2178 | ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, |
2160 | &wr_failed); | 2179 | &wr_failed); |
2161 | if (ret) { | 2180 | if (ret) { |
@@ -2950,6 +2969,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2950 | &isert_cmd->tx_desc.send_wr); | 2969 | &isert_cmd->tx_desc.send_wr); |
2951 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; | 2970 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; |
2952 | wr->send_wr_num += 1; | 2971 | wr->send_wr_num += 1; |
2972 | |||
2973 | rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); | ||
2974 | if (rc) { | ||
2975 | isert_err("ib_post_recv failed with %d\n", rc); | ||
2976 | return rc; | ||
2977 | } | ||
2953 | } | 2978 | } |
2954 | 2979 | ||
2955 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); | 2980 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); |
@@ -2999,9 +3024,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2999 | static int | 3024 | static int |
3000 | isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | 3025 | isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) |
3001 | { | 3026 | { |
3002 | int ret; | 3027 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
3028 | int ret = 0; | ||
3003 | 3029 | ||
3004 | switch (state) { | 3030 | switch (state) { |
3031 | case ISTATE_REMOVE: | ||
3032 | spin_lock_bh(&conn->cmd_lock); | ||
3033 | list_del_init(&cmd->i_conn_node); | ||
3034 | spin_unlock_bh(&conn->cmd_lock); | ||
3035 | isert_put_cmd(isert_cmd, true); | ||
3036 | break; | ||
3005 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: | 3037 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: |
3006 | ret = isert_put_nopin(cmd, conn, false); | 3038 | ret = isert_put_nopin(cmd, conn, false); |
3007 | break; | 3039 | break; |
@@ -3106,10 +3138,10 @@ isert_setup_np(struct iscsi_np *np, | |||
3106 | isert_err("Unable to allocate struct isert_np\n"); | 3138 | isert_err("Unable to allocate struct isert_np\n"); |
3107 | return -ENOMEM; | 3139 | return -ENOMEM; |
3108 | } | 3140 | } |
3109 | sema_init(&isert_np->np_sem, 0); | 3141 | sema_init(&isert_np->sem, 0); |
3110 | mutex_init(&isert_np->np_accept_mutex); | 3142 | mutex_init(&isert_np->mutex); |
3111 | INIT_LIST_HEAD(&isert_np->np_accept_list); | 3143 | INIT_LIST_HEAD(&isert_np->accepted); |
3112 | init_completion(&isert_np->np_login_comp); | 3144 | INIT_LIST_HEAD(&isert_np->pending); |
3113 | isert_np->np = np; | 3145 | isert_np->np = np; |
3114 | 3146 | ||
3115 | /* | 3147 | /* |
@@ -3125,7 +3157,7 @@ isert_setup_np(struct iscsi_np *np, | |||
3125 | goto out; | 3157 | goto out; |
3126 | } | 3158 | } |
3127 | 3159 | ||
3128 | isert_np->np_cm_id = isert_lid; | 3160 | isert_np->cm_id = isert_lid; |
3129 | np->np_context = isert_np; | 3161 | np->np_context = isert_np; |
3130 | 3162 | ||
3131 | return 0; | 3163 | return 0; |
@@ -3214,7 +3246,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | |||
3214 | int ret; | 3246 | int ret; |
3215 | 3247 | ||
3216 | accept_wait: | 3248 | accept_wait: |
3217 | ret = down_interruptible(&isert_np->np_sem); | 3249 | ret = down_interruptible(&isert_np->sem); |
3218 | if (ret) | 3250 | if (ret) |
3219 | return -ENODEV; | 3251 | return -ENODEV; |
3220 | 3252 | ||
@@ -3231,15 +3263,15 @@ accept_wait: | |||
3231 | } | 3263 | } |
3232 | spin_unlock_bh(&np->np_thread_lock); | 3264 | spin_unlock_bh(&np->np_thread_lock); |
3233 | 3265 | ||
3234 | mutex_lock(&isert_np->np_accept_mutex); | 3266 | mutex_lock(&isert_np->mutex); |
3235 | if (list_empty(&isert_np->np_accept_list)) { | 3267 | if (list_empty(&isert_np->pending)) { |
3236 | mutex_unlock(&isert_np->np_accept_mutex); | 3268 | mutex_unlock(&isert_np->mutex); |
3237 | goto accept_wait; | 3269 | goto accept_wait; |
3238 | } | 3270 | } |
3239 | isert_conn = list_first_entry(&isert_np->np_accept_list, | 3271 | isert_conn = list_first_entry(&isert_np->pending, |
3240 | struct isert_conn, accept_node); | 3272 | struct isert_conn, node); |
3241 | list_del_init(&isert_conn->accept_node); | 3273 | list_del_init(&isert_conn->node); |
3242 | mutex_unlock(&isert_np->np_accept_mutex); | 3274 | mutex_unlock(&isert_np->mutex); |
3243 | 3275 | ||
3244 | conn->context = isert_conn; | 3276 | conn->context = isert_conn; |
3245 | isert_conn->conn = conn; | 3277 | isert_conn->conn = conn; |
@@ -3257,28 +3289,39 @@ isert_free_np(struct iscsi_np *np) | |||
3257 | struct isert_np *isert_np = np->np_context; | 3289 | struct isert_np *isert_np = np->np_context; |
3258 | struct isert_conn *isert_conn, *n; | 3290 | struct isert_conn *isert_conn, *n; |
3259 | 3291 | ||
3260 | if (isert_np->np_cm_id) | 3292 | if (isert_np->cm_id) |
3261 | rdma_destroy_id(isert_np->np_cm_id); | 3293 | rdma_destroy_id(isert_np->cm_id); |
3262 | 3294 | ||
3263 | /* | 3295 | /* |
3264 | * FIXME: At this point we don't have a good way to insure | 3296 | * FIXME: At this point we don't have a good way to insure |
3265 | * that at this point we don't have hanging connections that | 3297 | * that at this point we don't have hanging connections that |
3266 | * completed RDMA establishment but didn't start iscsi login | 3298 | * completed RDMA establishment but didn't start iscsi login |
3267 | * process. So work-around this by cleaning up what ever piled | 3299 | * process. So work-around this by cleaning up what ever piled |
3268 | * up in np_accept_list. | 3300 | * up in accepted and pending lists. |
3269 | */ | 3301 | */ |
3270 | mutex_lock(&isert_np->np_accept_mutex); | 3302 | mutex_lock(&isert_np->mutex); |
3271 | if (!list_empty(&isert_np->np_accept_list)) { | 3303 | if (!list_empty(&isert_np->pending)) { |
3272 | isert_info("Still have isert connections, cleaning up...\n"); | 3304 | isert_info("Still have isert pending connections\n"); |
3305 | list_for_each_entry_safe(isert_conn, n, | ||
3306 | &isert_np->pending, | ||
3307 | node) { | ||
3308 | isert_info("cleaning isert_conn %p state (%d)\n", | ||
3309 | isert_conn, isert_conn->state); | ||
3310 | isert_connect_release(isert_conn); | ||
3311 | } | ||
3312 | } | ||
3313 | |||
3314 | if (!list_empty(&isert_np->accepted)) { | ||
3315 | isert_info("Still have isert accepted connections\n"); | ||
3273 | list_for_each_entry_safe(isert_conn, n, | 3316 | list_for_each_entry_safe(isert_conn, n, |
3274 | &isert_np->np_accept_list, | 3317 | &isert_np->accepted, |
3275 | accept_node) { | 3318 | node) { |
3276 | isert_info("cleaning isert_conn %p state (%d)\n", | 3319 | isert_info("cleaning isert_conn %p state (%d)\n", |
3277 | isert_conn, isert_conn->state); | 3320 | isert_conn, isert_conn->state); |
3278 | isert_connect_release(isert_conn); | 3321 | isert_connect_release(isert_conn); |
3279 | } | 3322 | } |
3280 | } | 3323 | } |
3281 | mutex_unlock(&isert_np->np_accept_mutex); | 3324 | mutex_unlock(&isert_np->mutex); |
3282 | 3325 | ||
3283 | np->np_context = NULL; | 3326 | np->np_context = NULL; |
3284 | kfree(isert_np); | 3327 | kfree(isert_np); |
@@ -3345,6 +3388,41 @@ isert_wait4flush(struct isert_conn *isert_conn) | |||
3345 | wait_for_completion(&isert_conn->wait_comp_err); | 3388 | wait_for_completion(&isert_conn->wait_comp_err); |
3346 | } | 3389 | } |
3347 | 3390 | ||
3391 | /** | ||
3392 | * isert_put_unsol_pending_cmds() - Drop commands waiting for | ||
3393 | * unsolicitate dataout | ||
3394 | * @conn: iscsi connection | ||
3395 | * | ||
3396 | * We might still have commands that are waiting for unsolicited | ||
3397 | * dataouts messages. We must put the extra reference on those | ||
3398 | * before blocking on the target_wait_for_session_cmds | ||
3399 | */ | ||
3400 | static void | ||
3401 | isert_put_unsol_pending_cmds(struct iscsi_conn *conn) | ||
3402 | { | ||
3403 | struct iscsi_cmd *cmd, *tmp; | ||
3404 | static LIST_HEAD(drop_cmd_list); | ||
3405 | |||
3406 | spin_lock_bh(&conn->cmd_lock); | ||
3407 | list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { | ||
3408 | if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && | ||
3409 | (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && | ||
3410 | (cmd->write_data_done < cmd->se_cmd.data_length)) | ||
3411 | list_move_tail(&cmd->i_conn_node, &drop_cmd_list); | ||
3412 | } | ||
3413 | spin_unlock_bh(&conn->cmd_lock); | ||
3414 | |||
3415 | list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { | ||
3416 | list_del_init(&cmd->i_conn_node); | ||
3417 | if (cmd->i_state != ISTATE_REMOVE) { | ||
3418 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | ||
3419 | |||
3420 | isert_info("conn %p dropping cmd %p\n", conn, cmd); | ||
3421 | isert_put_cmd(isert_cmd, true); | ||
3422 | } | ||
3423 | } | ||
3424 | } | ||
3425 | |||
3348 | static void isert_wait_conn(struct iscsi_conn *conn) | 3426 | static void isert_wait_conn(struct iscsi_conn *conn) |
3349 | { | 3427 | { |
3350 | struct isert_conn *isert_conn = conn->context; | 3428 | struct isert_conn *isert_conn = conn->context; |
@@ -3363,8 +3441,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) | |||
3363 | isert_conn_terminate(isert_conn); | 3441 | isert_conn_terminate(isert_conn); |
3364 | mutex_unlock(&isert_conn->mutex); | 3442 | mutex_unlock(&isert_conn->mutex); |
3365 | 3443 | ||
3366 | isert_wait4cmds(conn); | ||
3367 | isert_wait4flush(isert_conn); | 3444 | isert_wait4flush(isert_conn); |
3445 | isert_put_unsol_pending_cmds(conn); | ||
3446 | isert_wait4cmds(conn); | ||
3368 | isert_wait4logout(isert_conn); | 3447 | isert_wait4logout(isert_conn); |
3369 | 3448 | ||
3370 | queue_work(isert_release_wq, &isert_conn->release_work); | 3449 | queue_work(isert_release_wq, &isert_conn->release_work); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 6a04ba3c0f72..c5b99bcecbcf 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -113,7 +113,6 @@ enum { | |||
113 | }; | 113 | }; |
114 | 114 | ||
115 | struct isert_rdma_wr { | 115 | struct isert_rdma_wr { |
116 | struct list_head wr_list; | ||
117 | struct isert_cmd *isert_cmd; | 116 | struct isert_cmd *isert_cmd; |
118 | enum iser_ib_op_code iser_ib_op; | 117 | enum iser_ib_op_code iser_ib_op; |
119 | struct ib_sge *ib_sge; | 118 | struct ib_sge *ib_sge; |
@@ -134,14 +133,13 @@ struct isert_cmd { | |||
134 | uint64_t write_va; | 133 | uint64_t write_va; |
135 | u64 pdu_buf_dma; | 134 | u64 pdu_buf_dma; |
136 | u32 pdu_buf_len; | 135 | u32 pdu_buf_len; |
137 | u32 read_va_off; | ||
138 | u32 write_va_off; | ||
139 | u32 rdma_wr_num; | ||
140 | struct isert_conn *conn; | 136 | struct isert_conn *conn; |
141 | struct iscsi_cmd *iscsi_cmd; | 137 | struct iscsi_cmd *iscsi_cmd; |
142 | struct iser_tx_desc tx_desc; | 138 | struct iser_tx_desc tx_desc; |
139 | struct iser_rx_desc *rx_desc; | ||
143 | struct isert_rdma_wr rdma_wr; | 140 | struct isert_rdma_wr rdma_wr; |
144 | struct work_struct comp_work; | 141 | struct work_struct comp_work; |
142 | struct scatterlist sg; | ||
145 | }; | 143 | }; |
146 | 144 | ||
147 | struct isert_device; | 145 | struct isert_device; |
@@ -159,11 +157,10 @@ struct isert_conn { | |||
159 | u64 login_req_dma; | 157 | u64 login_req_dma; |
160 | int login_req_len; | 158 | int login_req_len; |
161 | u64 login_rsp_dma; | 159 | u64 login_rsp_dma; |
162 | unsigned int rx_desc_head; | ||
163 | struct iser_rx_desc *rx_descs; | 160 | struct iser_rx_desc *rx_descs; |
164 | struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; | 161 | struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS]; |
165 | struct iscsi_conn *conn; | 162 | struct iscsi_conn *conn; |
166 | struct list_head accept_node; | 163 | struct list_head node; |
167 | struct completion login_comp; | 164 | struct completion login_comp; |
168 | struct completion login_req_comp; | 165 | struct completion login_req_comp; |
169 | struct iser_tx_desc login_tx_desc; | 166 | struct iser_tx_desc login_tx_desc; |
@@ -222,9 +219,9 @@ struct isert_device { | |||
222 | 219 | ||
223 | struct isert_np { | 220 | struct isert_np { |
224 | struct iscsi_np *np; | 221 | struct iscsi_np *np; |
225 | struct semaphore np_sem; | 222 | struct semaphore sem; |
226 | struct rdma_cm_id *np_cm_id; | 223 | struct rdma_cm_id *cm_id; |
227 | struct mutex np_accept_mutex; | 224 | struct mutex mutex; |
228 | struct list_head np_accept_list; | 225 | struct list_head accepted; |
229 | struct completion np_login_comp; | 226 | struct list_head pending; |
230 | }; | 227 | }; |
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig | |||
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY | |||
196 | config JOYSTICK_ZHENHUA | 196 | config JOYSTICK_ZHENHUA |
197 | tristate "5-byte Zhenhua RC transmitter" | 197 | tristate "5-byte Zhenhua RC transmitter" |
198 | select SERIO | 198 | select SERIO |
199 | select BITREVERSE | ||
199 | help | 200 | help |
200 | Say Y here if you have a Zhen Hua PPM-4CH transmitter which is | 201 | Say Y here if you have a Zhen Hua PPM-4CH transmitter which is |
201 | supplied with a ready to fly micro electric indoor helicopters | 202 | supplied with a ready to fly micro electric indoor helicopters |
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c | |||
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) | |||
150 | if (w->counter == 24) { /* full frame */ | 150 | if (w->counter == 24) { /* full frame */ |
151 | walkera0701_parse_frame(w); | 151 | walkera0701_parse_frame(w); |
152 | w->counter = NO_SYNC; | 152 | w->counter = NO_SYNC; |
153 | if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ | 153 | if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ |
154 | w->counter = 0; | 154 | w->counter = 0; |
155 | } else { | 155 | } else { |
156 | if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) | 156 | if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) |
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) | |||
161 | } else | 161 | } else |
162 | w->counter = NO_SYNC; | 162 | w->counter = NO_SYNC; |
163 | } | 163 | } |
164 | } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < | 164 | } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < |
165 | RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ | 165 | RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ |
166 | w->counter = 0; | 166 | w->counter = 0; |
167 | 167 | ||
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c | |||
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) | |||
266 | 266 | ||
267 | error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); | 267 | error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); |
268 | if (error) | 268 | if (error) |
269 | return error; | 269 | goto err_free_keypad; |
270 | 270 | ||
271 | res = request_mem_region(res->start, resource_size(res), pdev->name); | 271 | res = request_mem_region(res->start, resource_size(res), pdev->name); |
272 | if (!res) { | 272 | if (!res) { |
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c | |||
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, | |||
93 | default: | 93 | default: |
94 | reset_type = PON_PS_HOLD_TYPE_HARD_RESET; | 94 | reset_type = PON_PS_HOLD_TYPE_HARD_RESET; |
95 | break; | 95 | break; |
96 | }; | 96 | } |
97 | 97 | ||
98 | error = regmap_update_bits(pwrkey->regmap, | 98 | error = regmap_update_bits(pwrkey->regmap, |
99 | pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, | 99 | pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, | |||
414 | dev->id.product = user_dev->id.product; | 414 | dev->id.product = user_dev->id.product; |
415 | dev->id.version = user_dev->id.version; | 415 | dev->id.version = user_dev->id.version; |
416 | 416 | ||
417 | for_each_set_bit(i, dev->absbit, ABS_CNT) { | 417 | for (i = 0; i < ABS_CNT; i++) { |
418 | input_abs_set_max(dev, i, user_dev->absmax[i]); | 418 | input_abs_set_max(dev, i, user_dev->absmax[i]); |
419 | input_abs_set_min(dev, i, user_dev->absmin[i]); | 419 | input_abs_set_min(dev, i, user_dev->absmin[i]); |
420 | input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); | 420 | input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); |
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index 5f191071d44a..e4eb048d1bf6 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c | |||
@@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa) | |||
241 | memcpy(&cyapa->product_id[13], &resp_data[62], 2); | 241 | memcpy(&cyapa->product_id[13], &resp_data[62], 2); |
242 | cyapa->product_id[15] = '\0'; | 242 | cyapa->product_id[15] = '\0'; |
243 | 243 | ||
244 | /* Get the number of Rx electrodes. */ | ||
244 | rotat_align = resp_data[68]; | 245 | rotat_align = resp_data[68]; |
245 | if (rotat_align) { | 246 | cyapa->electrodes_rx = |
246 | cyapa->electrodes_rx = cyapa->electrodes_y; | 247 | rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x; |
247 | cyapa->electrodes_rx = cyapa->electrodes_y; | ||
248 | } else { | ||
249 | cyapa->electrodes_rx = cyapa->electrodes_x; | ||
250 | cyapa->electrodes_rx = cyapa->electrodes_y; | ||
251 | } | ||
252 | cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u; | 248 | cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u; |
253 | 249 | ||
254 | if (!cyapa->electrodes_x || !cyapa->electrodes_y || | 250 | if (!cyapa->electrodes_x || !cyapa->electrodes_y || |
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h | |||
@@ -60,7 +60,7 @@ struct elan_transport_ops { | |||
60 | int (*get_sm_version)(struct i2c_client *client, | 60 | int (*get_sm_version)(struct i2c_client *client, |
61 | u8* ic_type, u8 *version); | 61 | u8* ic_type, u8 *version); |
62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); | 62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); |
63 | int (*get_product_id)(struct i2c_client *client, u8 *id); | 63 | int (*get_product_id)(struct i2c_client *client, u16 *id); |
64 | 64 | ||
65 | int (*get_max)(struct i2c_client *client, | 65 | int (*get_max)(struct i2c_client *client, |
66 | unsigned int *max_x, unsigned int *max_y); | 66 | unsigned int *max_x, unsigned int *max_y); |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "elan_i2c.h" | 40 | #include "elan_i2c.h" |
41 | 41 | ||
42 | #define DRIVER_NAME "elan_i2c" | 42 | #define DRIVER_NAME "elan_i2c" |
43 | #define ELAN_DRIVER_VERSION "1.6.0" | 43 | #define ELAN_DRIVER_VERSION "1.6.1" |
44 | #define ETP_MAX_PRESSURE 255 | 44 | #define ETP_MAX_PRESSURE 255 |
45 | #define ETP_FWIDTH_REDUCE 90 | 45 | #define ETP_FWIDTH_REDUCE 90 |
46 | #define ETP_FINGER_WIDTH 15 | 46 | #define ETP_FINGER_WIDTH 15 |
@@ -76,7 +76,7 @@ struct elan_tp_data { | |||
76 | unsigned int x_res; | 76 | unsigned int x_res; |
77 | unsigned int y_res; | 77 | unsigned int y_res; |
78 | 78 | ||
79 | u8 product_id; | 79 | u16 product_id; |
80 | u8 fw_version; | 80 | u8 fw_version; |
81 | u8 sm_version; | 81 | u8 sm_version; |
82 | u8 iap_version; | 82 | u8 iap_version; |
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, | |||
98 | u16 *signature_address) | 98 | u16 *signature_address) |
99 | { | 99 | { |
100 | switch (iap_version) { | 100 | switch (iap_version) { |
101 | case 0x00: | ||
102 | case 0x06: | ||
101 | case 0x08: | 103 | case 0x08: |
102 | *validpage_count = 512; | 104 | *validpage_count = 512; |
103 | break; | 105 | break; |
106 | case 0x03: | ||
107 | case 0x07: | ||
104 | case 0x09: | 108 | case 0x09: |
109 | case 0x0A: | ||
110 | case 0x0B: | ||
111 | case 0x0C: | ||
105 | *validpage_count = 768; | 112 | *validpage_count = 768; |
106 | break; | 113 | break; |
107 | case 0x0D: | 114 | case 0x0D: |
108 | *validpage_count = 896; | 115 | *validpage_count = 896; |
109 | break; | 116 | break; |
117 | case 0x0E: | ||
118 | *validpage_count = 640; | ||
119 | break; | ||
110 | default: | 120 | default: |
111 | /* unknown ic type clear value */ | 121 | /* unknown ic type clear value */ |
112 | *validpage_count = 0; | 122 | *validpage_count = 0; |
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) | |||
266 | 276 | ||
267 | error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, | 277 | error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, |
268 | &data->fw_signature_address); | 278 | &data->fw_signature_address); |
269 | if (error) { | 279 | if (error) |
270 | dev_err(&data->client->dev, | 280 | dev_warn(&data->client->dev, |
271 | "unknown iap version %d\n", data->iap_version); | 281 | "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", |
272 | return error; | 282 | data->iap_version, data->ic_type); |
273 | } | ||
274 | 283 | ||
275 | return 0; | 284 | return 0; |
276 | } | 285 | } |
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, | |||
486 | const u8 *fw_signature; | 495 | const u8 *fw_signature; |
487 | static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; | 496 | static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; |
488 | 497 | ||
498 | if (data->fw_validpage_count == 0) | ||
499 | return -EINVAL; | ||
500 | |||
489 | /* Look for a firmware with the product id appended. */ | 501 | /* Look for a firmware with the product id appended. */ |
490 | fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); | 502 | fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); |
491 | if (!fw_name) { | 503 | if (!fw_name) { |
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, | |||
276 | return 0; | 276 | return 0; |
277 | } | 277 | } |
278 | 278 | ||
279 | static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) | 279 | static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) |
280 | { | 280 | { |
281 | int error; | 281 | int error; |
282 | u8 val[3]; | 282 | u8 val[3]; |
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) | |||
287 | return error; | 287 | return error; |
288 | } | 288 | } |
289 | 289 | ||
290 | *id = val[0]; | 290 | *id = le16_to_cpup((__le16 *)val); |
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c | |||
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) | 186 | static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) |
187 | { | 187 | { |
188 | int error; | 188 | int error; |
189 | u8 val[3]; | 189 | u8 val[3]; |
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) | |||
195 | return error; | 195 | return error; |
196 | } | 196 | } |
197 | 197 | ||
198 | *id = val[1]; | 198 | *id = be16_to_cpup((__be16 *)val); |
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) | |||
519 | struct synaptics_data *priv = psmouse->private; | 519 | struct synaptics_data *priv = psmouse->private; |
520 | 520 | ||
521 | priv->mode = 0; | 521 | priv->mode = 0; |
522 | 522 | if (priv->absolute_mode) | |
523 | if (priv->absolute_mode) { | ||
524 | priv->mode |= SYN_BIT_ABSOLUTE_MODE; | 523 | priv->mode |= SYN_BIT_ABSOLUTE_MODE; |
525 | if (SYN_CAP_EXTENDED(priv->capabilities)) | 524 | if (priv->disable_gesture) |
526 | priv->mode |= SYN_BIT_W_MODE; | ||
527 | } | ||
528 | |||
529 | if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) | ||
530 | priv->mode |= SYN_BIT_DISABLE_GESTURE; | 525 | priv->mode |= SYN_BIT_DISABLE_GESTURE; |
531 | |||
532 | if (psmouse->rate >= 80) | 526 | if (psmouse->rate >= 80) |
533 | priv->mode |= SYN_BIT_HIGH_RATE; | 527 | priv->mode |= SYN_BIT_HIGH_RATE; |
528 | if (SYN_CAP_EXTENDED(priv->capabilities)) | ||
529 | priv->mode |= SYN_BIT_W_MODE; | ||
534 | 530 | ||
535 | if (synaptics_mode_cmd(psmouse, priv->mode)) | 531 | if (synaptics_mode_cmd(psmouse, priv->mode)) |
536 | return -1; | 532 | return -1; |
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c | |||
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) | |||
212 | * time before the ACK arrives. | 212 | * time before the ACK arrives. |
213 | */ | 213 | */ |
214 | if (ps2_sendbyte(ps2dev, command & 0xff, | 214 | if (ps2_sendbyte(ps2dev, command & 0xff, |
215 | command == PS2_CMD_RESET_BAT ? 1000 : 200)) | 215 | command == PS2_CMD_RESET_BAT ? 1000 : 200)) { |
216 | goto out; | 216 | serio_pause_rx(ps2dev->serio); |
217 | goto out_reset_flags; | ||
218 | } | ||
217 | 219 | ||
218 | for (i = 0; i < send; i++) | 220 | for (i = 0; i < send; i++) { |
219 | if (ps2_sendbyte(ps2dev, param[i], 200)) | 221 | if (ps2_sendbyte(ps2dev, param[i], 200)) { |
220 | goto out; | 222 | serio_pause_rx(ps2dev->serio); |
223 | goto out_reset_flags; | ||
224 | } | ||
225 | } | ||
221 | 226 | ||
222 | /* | 227 | /* |
223 | * The reset command takes a long time to execute. | 228 | * The reset command takes a long time to execute. |
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) | |||
234 | !(ps2dev->flags & PS2_FLAG_CMD), timeout); | 239 | !(ps2dev->flags & PS2_FLAG_CMD), timeout); |
235 | } | 240 | } |
236 | 241 | ||
242 | serio_pause_rx(ps2dev->serio); | ||
243 | |||
237 | if (param) | 244 | if (param) |
238 | for (i = 0; i < receive; i++) | 245 | for (i = 0; i < receive; i++) |
239 | param[i] = ps2dev->cmdbuf[(receive - 1) - i]; | 246 | param[i] = ps2dev->cmdbuf[(receive - 1) - i]; |
240 | 247 | ||
241 | if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) | 248 | if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) |
242 | goto out; | 249 | goto out_reset_flags; |
243 | 250 | ||
244 | rc = 0; | 251 | rc = 0; |
245 | 252 | ||
246 | out: | 253 | out_reset_flags: |
247 | serio_pause_rx(ps2dev->serio); | ||
248 | ps2dev->flags = 0; | 254 | ps2dev->flags = 0; |
249 | serio_continue_rx(ps2dev->serio); | 255 | serio_continue_rx(ps2dev->serio); |
250 | 256 | ||
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c | |||
@@ -194,6 +194,7 @@ static int __init parkbd_init(void) | |||
194 | parkbd_port = parkbd_allocate_serio(); | 194 | parkbd_port = parkbd_allocate_serio(); |
195 | if (!parkbd_port) { | 195 | if (!parkbd_port) { |
196 | parport_release(parkbd_dev); | 196 | parport_release(parkbd_dev); |
197 | parport_unregister_device(parkbd_dev); | ||
197 | return -ENOMEM; | 198 | return -ENOMEM; |
198 | } | 199 | } |
199 | 200 | ||
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 0f5f968592bd..04edc8f7122f 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val) | |||
668 | 668 | ||
669 | static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) | 669 | static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) |
670 | { | 670 | { |
671 | int value; | ||
671 | struct spi_transfer *t = | 672 | struct spi_transfer *t = |
672 | list_entry(m->transfers.prev, struct spi_transfer, transfer_list); | 673 | list_entry(m->transfers.prev, struct spi_transfer, transfer_list); |
673 | 674 | ||
674 | if (ts->model == 7845) { | 675 | if (ts->model == 7845) { |
675 | return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; | 676 | value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1])); |
676 | } else { | 677 | } else { |
677 | /* | 678 | /* |
678 | * adjust: on-wire is a must-ignore bit, a BE12 value, then | 679 | * adjust: on-wire is a must-ignore bit, a BE12 value, then |
679 | * padding; built from two 8 bit values written msb-first. | 680 | * padding; built from two 8 bit values written msb-first. |
680 | */ | 681 | */ |
681 | return be16_to_cpup((__be16 *)t->rx_buf) >> 3; | 682 | value = be16_to_cpup((__be16 *)t->rx_buf); |
682 | } | 683 | } |
684 | |||
685 | /* enforce ADC output is 12 bits width */ | ||
686 | return (value >> 3) & 0xfff; | ||
683 | } | 687 | } |
684 | 688 | ||
685 | static void ads7846_update_value(struct spi_message *m, int val) | 689 | static void ads7846_update_value(struct spi_message *m, int val) |
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c | |||
@@ -94,7 +94,7 @@ struct imx6ul_tsc { | |||
94 | * TSC module need ADC to get the measure value. So | 94 | * TSC module need ADC to get the measure value. So |
95 | * before config TSC, we should initialize ADC module. | 95 | * before config TSC, we should initialize ADC module. |
96 | */ | 96 | */ |
97 | static void imx6ul_adc_init(struct imx6ul_tsc *tsc) | 97 | static int imx6ul_adc_init(struct imx6ul_tsc *tsc) |
98 | { | 98 | { |
99 | int adc_hc = 0; | 99 | int adc_hc = 0; |
100 | int adc_gc; | 100 | int adc_gc; |
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) | |||
122 | 122 | ||
123 | timeout = wait_for_completion_timeout | 123 | timeout = wait_for_completion_timeout |
124 | (&tsc->completion, ADC_TIMEOUT); | 124 | (&tsc->completion, ADC_TIMEOUT); |
125 | if (timeout == 0) | 125 | if (timeout == 0) { |
126 | dev_err(tsc->dev, "Timeout for adc calibration\n"); | 126 | dev_err(tsc->dev, "Timeout for adc calibration\n"); |
127 | return -ETIMEDOUT; | ||
128 | } | ||
127 | 129 | ||
128 | adc_gs = readl(tsc->adc_regs + REG_ADC_GS); | 130 | adc_gs = readl(tsc->adc_regs + REG_ADC_GS); |
129 | if (adc_gs & ADC_CALF) | 131 | if (adc_gs & ADC_CALF) { |
130 | dev_err(tsc->dev, "ADC calibration failed\n"); | 132 | dev_err(tsc->dev, "ADC calibration failed\n"); |
133 | return -EINVAL; | ||
134 | } | ||
131 | 135 | ||
132 | /* TSC need the ADC work in hardware trigger */ | 136 | /* TSC need the ADC work in hardware trigger */ |
133 | adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); | 137 | adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); |
134 | adc_cfg |= ADC_HARDWARE_TRIGGER; | 138 | adc_cfg |= ADC_HARDWARE_TRIGGER; |
135 | writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); | 139 | writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); |
140 | |||
141 | return 0; | ||
136 | } | 142 | } |
137 | 143 | ||
138 | /* | 144 | /* |
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) | |||
188 | writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); | 194 | writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); |
189 | } | 195 | } |
190 | 196 | ||
191 | static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) | 197 | static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) |
192 | { | 198 | { |
193 | imx6ul_adc_init(tsc); | 199 | int err; |
200 | |||
201 | err = imx6ul_adc_init(tsc); | ||
202 | if (err) | ||
203 | return err; | ||
194 | imx6ul_tsc_channel_config(tsc); | 204 | imx6ul_tsc_channel_config(tsc); |
195 | imx6ul_tsc_set(tsc); | 205 | imx6ul_tsc_set(tsc); |
206 | |||
207 | return 0; | ||
196 | } | 208 | } |
197 | 209 | ||
198 | static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) | 210 | static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) |
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) | |||
311 | return err; | 323 | return err; |
312 | } | 324 | } |
313 | 325 | ||
314 | imx6ul_tsc_init(tsc); | 326 | return imx6ul_tsc_init(tsc); |
315 | |||
316 | return 0; | ||
317 | } | 327 | } |
318 | 328 | ||
319 | static void imx6ul_tsc_close(struct input_dev *input_dev) | 329 | static void imx6ul_tsc_close(struct input_dev *input_dev) |
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
337 | int tsc_irq; | 347 | int tsc_irq; |
338 | int adc_irq; | 348 | int adc_irq; |
339 | 349 | ||
340 | tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); | 350 | tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); |
341 | if (!tsc) | 351 | if (!tsc) |
342 | return -ENOMEM; | 352 | return -ENOMEM; |
343 | 353 | ||
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
345 | if (!input_dev) | 355 | if (!input_dev) |
346 | return -ENOMEM; | 356 | return -ENOMEM; |
347 | 357 | ||
348 | input_dev->name = "iMX6UL TouchScreen Controller"; | 358 | input_dev->name = "iMX6UL Touchscreen Controller"; |
349 | input_dev->id.bustype = BUS_HOST; | 359 | input_dev->id.bustype = BUS_HOST; |
350 | 360 | ||
351 | input_dev->open = imx6ul_tsc_open; | 361 | input_dev->open = imx6ul_tsc_open; |
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
406 | } | 416 | } |
407 | 417 | ||
408 | adc_irq = platform_get_irq(pdev, 1); | 418 | adc_irq = platform_get_irq(pdev, 1); |
409 | if (adc_irq <= 0) { | 419 | if (adc_irq < 0) { |
410 | dev_err(&pdev->dev, "no adc irq resource?\n"); | 420 | dev_err(&pdev->dev, "no adc irq resource?\n"); |
411 | return adc_irq; | 421 | return adc_irq; |
412 | } | 422 | } |
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) | |||
491 | goto out; | 501 | goto out; |
492 | } | 502 | } |
493 | 503 | ||
494 | imx6ul_tsc_init(tsc); | 504 | retval = imx6ul_tsc_init(tsc); |
495 | } | 505 | } |
496 | 506 | ||
497 | out: | 507 | out: |
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c | |||
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) | |||
394 | if (of_property_read_u32(np, "x-size", &pdata->x_size)) { | 394 | if (of_property_read_u32(np, "x-size", &pdata->x_size)) { |
395 | dev_err(dev, "failed to get x-size property\n"); | 395 | dev_err(dev, "failed to get x-size property\n"); |
396 | return NULL; | 396 | return NULL; |
397 | }; | 397 | } |
398 | 398 | ||
399 | if (of_property_read_u32(np, "y-size", &pdata->y_size)) { | 399 | if (of_property_read_u32(np, "y-size", &pdata->y_size)) { |
400 | dev_err(dev, "failed to get y-size property\n"); | 400 | dev_err(dev, "failed to get y-size property\n"); |
401 | return NULL; | 401 | return NULL; |
402 | }; | 402 | } |
403 | 403 | ||
404 | of_property_read_u32(np, "contact-threshold", | 404 | of_property_read_u32(np, "contact-threshold", |
405 | &pdata->contact_threshold); | 405 | &pdata->contact_threshold); |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..cbe6a890a93a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | # SWIOTLB guarantees a dma_to_phys() implementation | 26 | depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) |
27 | depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) | ||
28 | help | 27 | help |
29 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
30 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
@@ -43,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST | |||
43 | endmenu | 42 | endmenu |
44 | 43 | ||
45 | config IOMMU_IOVA | 44 | config IOMMU_IOVA |
46 | bool | 45 | tristate |
47 | 46 | ||
48 | config OF_IOMMU | 47 | config OF_IOMMU |
49 | def_bool y | 48 | def_bool y |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..08d2775887f7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
2006 | { | 2006 | { |
2007 | struct amd_iommu *iommu; | 2007 | struct amd_iommu *iommu; |
2008 | 2008 | ||
2009 | /* | ||
2010 | * First check if the device is still attached. It might already | ||
2011 | * be detached from its domain because the generic | ||
2012 | * iommu_detach_group code detached it and we try again here in | ||
2013 | * our alias handling. | ||
2014 | */ | ||
2015 | if (!dev_data->domain) | ||
2016 | return; | ||
2017 | |||
2009 | iommu = amd_iommu_rlookup_table[dev_data->devid]; | 2018 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
2010 | 2019 | ||
2011 | /* decrease reference counters */ | 2020 | /* decrease reference counters */ |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) | |||
1256 | if (!iommu->dev) | 1256 | if (!iommu->dev) |
1257 | return -ENODEV; | 1257 | return -ENODEV; |
1258 | 1258 | ||
1259 | /* Prevent binding other PCI device drivers to IOMMU devices */ | ||
1260 | iommu->dev->match_driver = false; | ||
1261 | |||
1259 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | 1262 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
1260 | &iommu->cap); | 1263 | &iommu->cap); |
1261 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | 1264 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #define IDR0_TTF_SHIFT 2 | 56 | #define IDR0_TTF_SHIFT 2 |
57 | #define IDR0_TTF_MASK 0x3 | 57 | #define IDR0_TTF_MASK 0x3 |
58 | #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) | 58 | #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) |
59 | #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) | ||
59 | #define IDR0_S1P (1 << 1) | 60 | #define IDR0_S1P (1 << 1) |
60 | #define IDR0_S2P (1 << 0) | 61 | #define IDR0_S2P (1 << 0) |
61 | 62 | ||
@@ -342,7 +343,8 @@ | |||
342 | #define CMDQ_TLBI_0_VMID_SHIFT 32 | 343 | #define CMDQ_TLBI_0_VMID_SHIFT 32 |
343 | #define CMDQ_TLBI_0_ASID_SHIFT 48 | 344 | #define CMDQ_TLBI_0_ASID_SHIFT 48 |
344 | #define CMDQ_TLBI_1_LEAF (1UL << 0) | 345 | #define CMDQ_TLBI_1_LEAF (1UL << 0) |
345 | #define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL | 346 | #define CMDQ_TLBI_1_VA_MASK ~0xfffUL |
347 | #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL | ||
346 | 348 | ||
347 | #define CMDQ_PRI_0_SSID_SHIFT 12 | 349 | #define CMDQ_PRI_0_SSID_SHIFT 12 |
348 | #define CMDQ_PRI_0_SSID_MASK 0xfffffUL | 350 | #define CMDQ_PRI_0_SSID_MASK 0xfffffUL |
@@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) | |||
770 | break; | 772 | break; |
771 | case CMDQ_OP_TLBI_NH_VA: | 773 | case CMDQ_OP_TLBI_NH_VA: |
772 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; | 774 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; |
773 | /* Fallthrough */ | 775 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; |
776 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; | ||
777 | break; | ||
774 | case CMDQ_OP_TLBI_S2_IPA: | 778 | case CMDQ_OP_TLBI_S2_IPA: |
775 | cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; | 779 | cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; |
776 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; | 780 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; |
777 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; | 781 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; |
778 | break; | 782 | break; |
779 | case CMDQ_OP_TLBI_NH_ASID: | 783 | case CMDQ_OP_TLBI_NH_ASID: |
780 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; | 784 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; |
@@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) | |||
2460 | } | 2464 | } |
2461 | 2465 | ||
2462 | /* We only support the AArch64 table format at present */ | 2466 | /* We only support the AArch64 table format at present */ |
2463 | if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { | 2467 | switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { |
2468 | case IDR0_TTF_AARCH32_64: | ||
2469 | smmu->ias = 40; | ||
2470 | /* Fallthrough */ | ||
2471 | case IDR0_TTF_AARCH64: | ||
2472 | break; | ||
2473 | default: | ||
2464 | dev_err(smmu->dev, "AArch64 table format not supported!\n"); | 2474 | dev_err(smmu->dev, "AArch64 table format not supported!\n"); |
2465 | return -ENXIO; | 2475 | return -ENXIO; |
2466 | } | 2476 | } |
@@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) | |||
2541 | dev_warn(smmu->dev, | 2551 | dev_warn(smmu->dev, |
2542 | "failed to set DMA mask for table walker\n"); | 2552 | "failed to set DMA mask for table walker\n"); |
2543 | 2553 | ||
2544 | if (!smmu->ias) | 2554 | smmu->ias = max(smmu->ias, smmu->oas); |
2545 | smmu->ias = smmu->oas; | ||
2546 | 2555 | ||
2547 | dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", | 2556 | dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", |
2548 | smmu->ias, smmu->oas, smmu->features); | 2557 | smmu->ias, smmu->oas, smmu->features); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..35365f046923 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2301,6 +2301,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, | |||
2301 | 2301 | ||
2302 | if (ret) { | 2302 | if (ret) { |
2303 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2303 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2304 | free_devinfo_mem(info); | ||
2304 | return NULL; | 2305 | return NULL; |
2305 | } | 2306 | } |
2306 | 2307 | ||
@@ -3215,6 +3216,8 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
3215 | 3216 | ||
3216 | /* Restrict dma_mask to the width that the iommu can handle */ | 3217 | /* Restrict dma_mask to the width that the iommu can handle */ |
3217 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); | 3218 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
3219 | /* Ensure we reserve the whole size-aligned region */ | ||
3220 | nrpages = __roundup_pow_of_two(nrpages); | ||
3218 | 3221 | ||
3219 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { | 3222 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { |
3220 | /* | 3223 | /* |
@@ -3711,7 +3714,7 @@ static inline int iommu_devinfo_cache_init(void) | |||
3711 | static int __init iommu_init_mempool(void) | 3714 | static int __init iommu_init_mempool(void) |
3712 | { | 3715 | { |
3713 | int ret; | 3716 | int ret; |
3714 | ret = iommu_iova_cache_init(); | 3717 | ret = iova_cache_get(); |
3715 | if (ret) | 3718 | if (ret) |
3716 | return ret; | 3719 | return ret; |
3717 | 3720 | ||
@@ -3725,7 +3728,7 @@ static int __init iommu_init_mempool(void) | |||
3725 | 3728 | ||
3726 | kmem_cache_destroy(iommu_domain_cache); | 3729 | kmem_cache_destroy(iommu_domain_cache); |
3727 | domain_error: | 3730 | domain_error: |
3728 | iommu_iova_cache_destroy(); | 3731 | iova_cache_put(); |
3729 | 3732 | ||
3730 | return -ENOMEM; | 3733 | return -ENOMEM; |
3731 | } | 3734 | } |
@@ -3734,7 +3737,7 @@ static void __init iommu_exit_mempool(void) | |||
3734 | { | 3737 | { |
3735 | kmem_cache_destroy(iommu_devinfo_cache); | 3738 | kmem_cache_destroy(iommu_devinfo_cache); |
3736 | kmem_cache_destroy(iommu_domain_cache); | 3739 | kmem_cache_destroy(iommu_domain_cache); |
3737 | iommu_iova_cache_destroy(); | 3740 | iova_cache_put(); |
3738 | } | 3741 | } |
3739 | 3742 | ||
3740 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | 3743 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; | |||
202 | 202 | ||
203 | static bool selftest_running = false; | 203 | static bool selftest_running = false; |
204 | 204 | ||
205 | static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) | 205 | static dma_addr_t __arm_lpae_dma_addr(void *pages) |
206 | { | 206 | { |
207 | return phys_to_dma(dev, virt_to_phys(pages)); | 207 | return (dma_addr_t)virt_to_phys(pages); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, | 210 | static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, |
@@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, | |||
223 | goto out_free; | 223 | goto out_free; |
224 | /* | 224 | /* |
225 | * We depend on the IOMMU being able to work with any physical | 225 | * We depend on the IOMMU being able to work with any physical |
226 | * address directly, so if the DMA layer suggests it can't by | 226 | * address directly, so if the DMA layer suggests otherwise by |
227 | * giving us back some translation, that bodes very badly... | 227 | * translating or truncating them, that bodes very badly... |
228 | */ | 228 | */ |
229 | if (dma != __arm_lpae_dma_addr(dev, pages)) | 229 | if (dma != virt_to_phys(pages)) |
230 | goto out_unmap; | 230 | goto out_unmap; |
231 | } | 231 | } |
232 | 232 | ||
@@ -243,10 +243,8 @@ out_free: | |||
243 | static void __arm_lpae_free_pages(void *pages, size_t size, | 243 | static void __arm_lpae_free_pages(void *pages, size_t size, |
244 | struct io_pgtable_cfg *cfg) | 244 | struct io_pgtable_cfg *cfg) |
245 | { | 245 | { |
246 | struct device *dev = cfg->iommu_dev; | ||
247 | |||
248 | if (!selftest_running) | 246 | if (!selftest_running) |
249 | dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), | 247 | dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), |
250 | size, DMA_TO_DEVICE); | 248 | size, DMA_TO_DEVICE); |
251 | free_pages_exact(pages, size); | 249 | free_pages_exact(pages, size); |
252 | } | 250 | } |
@@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, | |||
254 | static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, | 252 | static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, |
255 | struct io_pgtable_cfg *cfg) | 253 | struct io_pgtable_cfg *cfg) |
256 | { | 254 | { |
257 | struct device *dev = cfg->iommu_dev; | ||
258 | |||
259 | *ptep = pte; | 255 | *ptep = pte; |
260 | 256 | ||
261 | if (!selftest_running) | 257 | if (!selftest_running) |
262 | dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), | 258 | dma_sync_single_for_device(cfg->iommu_dev, |
259 | __arm_lpae_dma_addr(ptep), | ||
263 | sizeof(pte), DMA_TO_DEVICE); | 260 | sizeof(pte), DMA_TO_DEVICE); |
264 | } | 261 | } |
265 | 262 | ||
@@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) | |||
629 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) | 626 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) |
630 | return NULL; | 627 | return NULL; |
631 | 628 | ||
629 | if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { | ||
630 | dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); | ||
631 | return NULL; | ||
632 | } | ||
633 | |||
632 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 634 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
633 | if (!data) | 635 | if (!data) |
634 | return NULL; | 636 | return NULL; |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -18,42 +18,9 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/module.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | 23 | ||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
56 | |||
57 | void | 24 | void |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, | 25 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | 26 | unsigned long start_pfn, unsigned long pfn_32bit) |
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, | |||
72 | iovad->start_pfn = start_pfn; | 39 | iovad->start_pfn = start_pfn; |
73 | iovad->dma_32bit_pfn = pfn_32bit; | 40 | iovad->dma_32bit_pfn = pfn_32bit; |
74 | } | 41 | } |
42 | EXPORT_SYMBOL_GPL(init_iova_domain); | ||
75 | 43 | ||
76 | static struct rb_node * | 44 | static struct rb_node * |
77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | 45 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
120 | } | 88 | } |
121 | } | 89 | } |
122 | 90 | ||
123 | /* Computes the padding size required, to make the | 91 | /* |
124 | * the start address naturally aligned on its size | 92 | * Computes the padding size required, to make the start address |
93 | * naturally aligned on the power-of-two order of its size | ||
125 | */ | 94 | */ |
126 | static int | 95 | static unsigned int |
127 | iova_get_pad_size(int size, unsigned int limit_pfn) | 96 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) |
128 | { | 97 | { |
129 | unsigned int pad_size = 0; | 98 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
130 | unsigned int order = ilog2(size); | ||
131 | |||
132 | if (order) | ||
133 | pad_size = (limit_pfn + 1) % (1 << order); | ||
134 | |||
135 | return pad_size; | ||
136 | } | 99 | } |
137 | 100 | ||
138 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | 101 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
242 | rb_insert_color(&iova->node, root); | 205 | rb_insert_color(&iova->node, root); |
243 | } | 206 | } |
244 | 207 | ||
208 | static struct kmem_cache *iova_cache; | ||
209 | static unsigned int iova_cache_users; | ||
210 | static DEFINE_MUTEX(iova_cache_mutex); | ||
211 | |||
212 | struct iova *alloc_iova_mem(void) | ||
213 | { | ||
214 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | ||
215 | } | ||
216 | EXPORT_SYMBOL(alloc_iova_mem); | ||
217 | |||
218 | void free_iova_mem(struct iova *iova) | ||
219 | { | ||
220 | kmem_cache_free(iova_cache, iova); | ||
221 | } | ||
222 | EXPORT_SYMBOL(free_iova_mem); | ||
223 | |||
224 | int iova_cache_get(void) | ||
225 | { | ||
226 | mutex_lock(&iova_cache_mutex); | ||
227 | if (!iova_cache_users) { | ||
228 | iova_cache = kmem_cache_create( | ||
229 | "iommu_iova", sizeof(struct iova), 0, | ||
230 | SLAB_HWCACHE_ALIGN, NULL); | ||
231 | if (!iova_cache) { | ||
232 | mutex_unlock(&iova_cache_mutex); | ||
233 | printk(KERN_ERR "Couldn't create iova cache\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | iova_cache_users++; | ||
239 | mutex_unlock(&iova_cache_mutex); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(iova_cache_get); | ||
244 | |||
245 | void iova_cache_put(void) | ||
246 | { | ||
247 | mutex_lock(&iova_cache_mutex); | ||
248 | if (WARN_ON(!iova_cache_users)) { | ||
249 | mutex_unlock(&iova_cache_mutex); | ||
250 | return; | ||
251 | } | ||
252 | iova_cache_users--; | ||
253 | if (!iova_cache_users) | ||
254 | kmem_cache_destroy(iova_cache); | ||
255 | mutex_unlock(&iova_cache_mutex); | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(iova_cache_put); | ||
258 | |||
245 | /** | 259 | /** |
246 | * alloc_iova - allocates an iova | 260 | * alloc_iova - allocates an iova |
247 | * @iovad: - iova domain in question | 261 | * @iovad: - iova domain in question |
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
265 | if (!new_iova) | 279 | if (!new_iova) |
266 | return NULL; | 280 | return NULL; |
267 | 281 | ||
268 | /* If size aligned is set then round the size to | ||
269 | * to next power of two. | ||
270 | */ | ||
271 | if (size_aligned) | ||
272 | size = __roundup_pow_of_two(size); | ||
273 | |||
274 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, | 282 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
275 | new_iova, size_aligned); | 283 | new_iova, size_aligned); |
276 | 284 | ||
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
281 | 289 | ||
282 | return new_iova; | 290 | return new_iova; |
283 | } | 291 | } |
292 | EXPORT_SYMBOL_GPL(alloc_iova); | ||
284 | 293 | ||
285 | /** | 294 | /** |
286 | * find_iova - find's an iova for a given pfn | 295 | * find_iova - find's an iova for a given pfn |
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |||
321 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 330 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
322 | return NULL; | 331 | return NULL; |
323 | } | 332 | } |
333 | EXPORT_SYMBOL_GPL(find_iova); | ||
324 | 334 | ||
325 | /** | 335 | /** |
326 | * __free_iova - frees the given iova | 336 | * __free_iova - frees the given iova |
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) | |||
339 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 349 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
340 | free_iova_mem(iova); | 350 | free_iova_mem(iova); |
341 | } | 351 | } |
352 | EXPORT_SYMBOL_GPL(__free_iova); | ||
342 | 353 | ||
343 | /** | 354 | /** |
344 | * free_iova - finds and frees the iova for a given pfn | 355 | * free_iova - finds and frees the iova for a given pfn |
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) | |||
356 | __free_iova(iovad, iova); | 367 | __free_iova(iovad, iova); |
357 | 368 | ||
358 | } | 369 | } |
370 | EXPORT_SYMBOL_GPL(free_iova); | ||
359 | 371 | ||
360 | /** | 372 | /** |
361 | * put_iova_domain - destroys the iova doamin | 373 | * put_iova_domain - destroys the iova doamin |
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) | |||
378 | } | 390 | } |
379 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 391 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
380 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(put_iova_domain); | ||
381 | 394 | ||
382 | static int | 395 | static int |
383 | __is_range_overlap(struct rb_node *node, | 396 | __is_range_overlap(struct rb_node *node, |
@@ -467,6 +480,7 @@ finish: | |||
467 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 480 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
468 | return iova; | 481 | return iova; |
469 | } | 482 | } |
483 | EXPORT_SYMBOL_GPL(reserve_iova); | ||
470 | 484 | ||
471 | /** | 485 | /** |
472 | * copy_reserved_iova - copies the reserved between domains | 486 | * copy_reserved_iova - copies the reserved between domains |
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
493 | } | 507 | } |
494 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); | 508 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
495 | } | 509 | } |
510 | EXPORT_SYMBOL_GPL(copy_reserved_iova); | ||
496 | 511 | ||
497 | struct iova * | 512 | struct iova * |
498 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | 513 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, |
@@ -534,3 +549,6 @@ error: | |||
534 | free_iova_mem(prev); | 549 | free_iova_mem(prev); |
535 | return NULL; | 550 | return NULL; |
536 | } | 551 | } |
552 | |||
553 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); | ||
554 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index e9c6f2a5b52d..cd7d3bc78e34 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c | |||
@@ -65,12 +65,10 @@ static void combiner_unmask_irq(struct irq_data *data) | |||
65 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); | 65 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void combiner_handle_cascade_irq(unsigned int __irq, | 68 | static void combiner_handle_cascade_irq(struct irq_desc *desc) |
69 | struct irq_desc *desc) | ||
70 | { | 69 | { |
71 | struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); | 70 | struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); |
72 | struct irq_chip *chip = irq_desc_get_chip(desc); | 71 | struct irq_chip *chip = irq_desc_get_chip(desc); |
73 | unsigned int irq = irq_desc_get_irq(desc); | ||
74 | unsigned int cascade_irq, combiner_irq; | 72 | unsigned int cascade_irq, combiner_irq; |
75 | unsigned long status; | 73 | unsigned long status; |
76 | 74 | ||
@@ -88,7 +86,7 @@ static void combiner_handle_cascade_irq(unsigned int __irq, | |||
88 | cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); | 86 | cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); |
89 | 87 | ||
90 | if (unlikely(!cascade_irq)) | 88 | if (unlikely(!cascade_irq)) |
91 | handle_bad_irq(irq, desc); | 89 | handle_bad_irq(desc); |
92 | else | 90 | else |
93 | generic_handle_irq(cascade_irq); | 91 | generic_handle_irq(cascade_irq); |
94 | 92 | ||
@@ -165,7 +163,7 @@ static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
165 | 163 | ||
166 | irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); | 164 | irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); |
167 | irq_set_chip_data(irq, &combiner_data[hw >> 3]); | 165 | irq_set_chip_data(irq, &combiner_data[hw >> 3]); |
168 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 166 | irq_set_probe(irq); |
169 | 167 | ||
170 | return 0; | 168 | return 0; |
171 | } | 169 | } |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 39b72da0c143..655cb967a1f2 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -200,7 +200,6 @@ static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, | |||
200 | { | 200 | { |
201 | irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, | 201 | irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, |
202 | handle_simple_irq); | 202 | handle_simple_irq); |
203 | set_irq_flags(virq, IRQF_VALID); | ||
204 | 203 | ||
205 | return 0; | 204 | return 0; |
206 | } | 205 | } |
@@ -317,7 +316,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, | |||
317 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, | 316 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, |
318 | handle_level_irq); | 317 | handle_level_irq); |
319 | } | 318 | } |
320 | set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); | 319 | irq_set_probe(virq); |
321 | 320 | ||
322 | return 0; | 321 | return 0; |
323 | } | 322 | } |
@@ -447,8 +446,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) | |||
447 | static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} | 446 | static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} |
448 | #endif | 447 | #endif |
449 | 448 | ||
450 | static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, | 449 | static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) |
451 | struct irq_desc *desc) | ||
452 | { | 450 | { |
453 | struct irq_chip *chip = irq_desc_get_chip(desc); | 451 | struct irq_chip *chip = irq_desc_get_chip(desc); |
454 | unsigned long irqmap, irqn, irqsrc, cpuid; | 452 | unsigned long irqmap, irqn, irqsrc, cpuid; |
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 9da9942ac83c..f6d680485bee 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c | |||
@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d) | |||
88 | { | 88 | { |
89 | struct irq_domain *domain = d->domain; | 89 | struct irq_domain *domain = d->domain; |
90 | struct irq_domain_chip_generic *dgc = domain->gc; | 90 | struct irq_domain_chip_generic *dgc = domain->gc; |
91 | struct irq_chip_generic *gc = dgc->gc[0]; | 91 | struct irq_chip_generic *bgc = dgc->gc[0]; |
92 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
92 | 93 | ||
93 | /* Disable interrupt on AIC5 */ | 94 | /* |
94 | irq_gc_lock(gc); | 95 | * Disable interrupt on AIC5. We always take the lock of the |
96 | * first irq chip as all chips share the same registers. | ||
97 | */ | ||
98 | irq_gc_lock(bgc); | ||
95 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); | 99 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); |
96 | irq_reg_writel(gc, 1, AT91_AIC5_IDCR); | 100 | irq_reg_writel(gc, 1, AT91_AIC5_IDCR); |
97 | gc->mask_cache &= ~d->mask; | 101 | gc->mask_cache &= ~d->mask; |
98 | irq_gc_unlock(gc); | 102 | irq_gc_unlock(bgc); |
99 | } | 103 | } |
100 | 104 | ||
101 | static void aic5_unmask(struct irq_data *d) | 105 | static void aic5_unmask(struct irq_data *d) |
102 | { | 106 | { |
103 | struct irq_domain *domain = d->domain; | 107 | struct irq_domain *domain = d->domain; |
104 | struct irq_domain_chip_generic *dgc = domain->gc; | 108 | struct irq_domain_chip_generic *dgc = domain->gc; |
105 | struct irq_chip_generic *gc = dgc->gc[0]; | 109 | struct irq_chip_generic *bgc = dgc->gc[0]; |
110 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
106 | 111 | ||
107 | /* Enable interrupt on AIC5 */ | 112 | /* |
108 | irq_gc_lock(gc); | 113 | * Enable interrupt on AIC5. We always take the lock of the |
114 | * first irq chip as all chips share the same registers. | ||
115 | */ | ||
116 | irq_gc_lock(bgc); | ||
109 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); | 117 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); |
110 | irq_reg_writel(gc, 1, AT91_AIC5_IECR); | 118 | irq_reg_writel(gc, 1, AT91_AIC5_IECR); |
111 | gc->mask_cache |= d->mask; | 119 | gc->mask_cache |= d->mask; |
112 | irq_gc_unlock(gc); | 120 | irq_gc_unlock(bgc); |
113 | } | 121 | } |
114 | 122 | ||
115 | static int aic5_retrigger(struct irq_data *d) | 123 | static int aic5_retrigger(struct irq_data *d) |
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index ed4ca9deca70..bf9cc5f2e839 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c | |||
@@ -96,7 +96,7 @@ struct armctrl_ic { | |||
96 | static struct armctrl_ic intc __read_mostly; | 96 | static struct armctrl_ic intc __read_mostly; |
97 | static void __exception_irq_entry bcm2835_handle_irq( | 97 | static void __exception_irq_entry bcm2835_handle_irq( |
98 | struct pt_regs *regs); | 98 | struct pt_regs *regs); |
99 | static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc); | 99 | static void bcm2836_chained_handle_irq(struct irq_desc *desc); |
100 | 100 | ||
101 | static void armctrl_mask_irq(struct irq_data *d) | 101 | static void armctrl_mask_irq(struct irq_data *d) |
102 | { | 102 | { |
@@ -166,7 +166,7 @@ static int __init armctrl_of_init(struct device_node *node, | |||
166 | BUG_ON(irq <= 0); | 166 | BUG_ON(irq <= 0); |
167 | irq_set_chip_and_handler(irq, &armctrl_chip, | 167 | irq_set_chip_and_handler(irq, &armctrl_chip, |
168 | handle_level_irq); | 168 | handle_level_irq); |
169 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 169 | irq_set_probe(irq); |
170 | } | 170 | } |
171 | } | 171 | } |
172 | 172 | ||
@@ -245,7 +245,7 @@ static void __exception_irq_entry bcm2835_handle_irq( | |||
245 | handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); | 245 | handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc) | 248 | static void bcm2836_chained_handle_irq(struct irq_desc *desc) |
249 | { | 249 | { |
250 | u32 hwirq; | 250 | u32 hwirq; |
251 | 251 | ||
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 409bdc6366c2..0fea985ef1dc 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c | |||
@@ -115,7 +115,7 @@ static inline void l1_writel(u32 val, void __iomem *reg) | |||
115 | writel(val, reg); | 115 | writel(val, reg); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) | 118 | static void bcm7038_l1_irq_handle(struct irq_desc *desc) |
119 | { | 119 | { |
120 | struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); | 120 | struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); |
121 | struct bcm7038_l1_cpu *cpu; | 121 | struct bcm7038_l1_cpu *cpu; |
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index d3f976913a6f..61b18ab33ad9 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c | |||
@@ -56,7 +56,7 @@ struct bcm7120_l2_intc_data { | |||
56 | const __be32 *map_mask_prop; | 56 | const __be32 *map_mask_prop; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) | 59 | static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) |
60 | { | 60 | { |
61 | struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); | 61 | struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); |
62 | struct bcm7120_l2_intc_data *b = data->b; | 62 | struct bcm7120_l2_intc_data *b = data->b; |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index aedda06191eb..65cd341f331a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
@@ -49,13 +49,12 @@ struct brcmstb_l2_intc_data { | |||
49 | u32 saved_mask; /* for suspend/resume */ | 49 | u32 saved_mask; /* for suspend/resume */ |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static void brcmstb_l2_intc_irq_handle(unsigned int __irq, | 52 | static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) |
53 | struct irq_desc *desc) | ||
54 | { | 53 | { |
55 | struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); | 54 | struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); |
56 | struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); | 55 | struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); |
57 | struct irq_chip *chip = irq_desc_get_chip(desc); | 56 | struct irq_chip *chip = irq_desc_get_chip(desc); |
58 | unsigned int irq = irq_desc_get_irq(desc); | 57 | unsigned int irq; |
59 | u32 status; | 58 | u32 status; |
60 | 59 | ||
61 | chained_irq_enter(chip, desc); | 60 | chained_irq_enter(chip, desc); |
@@ -65,7 +64,7 @@ static void brcmstb_l2_intc_irq_handle(unsigned int __irq, | |||
65 | 64 | ||
66 | if (status == 0) { | 65 | if (status == 0) { |
67 | raw_spin_lock(&desc->lock); | 66 | raw_spin_lock(&desc->lock); |
68 | handle_bad_irq(irq, desc); | 67 | handle_bad_irq(desc); |
69 | raw_spin_unlock(&desc->lock); | 68 | raw_spin_unlock(&desc->lock); |
70 | goto out; | 69 | goto out; |
71 | } | 70 | } |
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index 2dd929eed9e0..eb5eb0cd414d 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c | |||
@@ -132,14 +132,14 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, | |||
132 | irq_hw_number_t hw) | 132 | irq_hw_number_t hw) |
133 | { | 133 | { |
134 | irq_flow_handler_t handler = handle_level_irq; | 134 | irq_flow_handler_t handler = handle_level_irq; |
135 | unsigned int flags = IRQF_VALID | IRQF_PROBE; | 135 | unsigned int flags = 0; |
136 | 136 | ||
137 | if (!clps711x_irqs[hw].flags) | 137 | if (!clps711x_irqs[hw].flags) |
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) { | 140 | if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) { |
141 | handler = handle_bad_irq; | 141 | handler = handle_bad_irq; |
142 | flags |= IRQF_NOAUTOEN; | 142 | flags |= IRQ_NOAUTOEN; |
143 | } else if (clps711x_irqs[hw].eoi) { | 143 | } else if (clps711x_irqs[hw].eoi) { |
144 | handler = handle_fasteoi_irq; | 144 | handler = handle_fasteoi_irq; |
145 | } | 145 | } |
@@ -149,7 +149,7 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, | |||
149 | writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi); | 149 | writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi); |
150 | 150 | ||
151 | irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler); | 151 | irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler); |
152 | set_irq_flags(virq, flags); | 152 | irq_modify_status(virq, IRQ_NOPROBE, flags); |
153 | 153 | ||
154 | return 0; | 154 | return 0; |
155 | } | 155 | } |
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index efd95d9955e7..052f266364c0 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #define APB_INT_FINALSTATUS_H 0x34 | 26 | #define APB_INT_FINALSTATUS_H 0x34 |
27 | #define APB_INT_BASE_OFFSET 0x04 | 27 | #define APB_INT_BASE_OFFSET 0x04 |
28 | 28 | ||
29 | static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) | 29 | static void dw_apb_ictl_handler(struct irq_desc *desc) |
30 | { | 30 | { |
31 | struct irq_domain *d = irq_desc_get_handler_data(desc); | 31 | struct irq_domain *d = irq_desc_get_handler_data(desc); |
32 | struct irq_chip *chip = irq_desc_get_chip(desc); | 32 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index db04fc1f56b2..12985daa66ab 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
@@ -95,8 +95,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
95 | struct v2m_data *v2m = irq_data_get_irq_chip_data(data); | 95 | struct v2m_data *v2m = irq_data_get_irq_chip_data(data); |
96 | phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; | 96 | phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; |
97 | 97 | ||
98 | msg->address_hi = (u32) (addr >> 32); | 98 | msg->address_hi = upper_32_bits(addr); |
99 | msg->address_lo = (u32) (addr); | 99 | msg->address_lo = lower_32_bits(addr); |
100 | msg->data = data->hwirq; | 100 | msg->data = data->hwirq; |
101 | } | 101 | } |
102 | 102 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c | |||
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | |||
62 | 62 | ||
63 | dev_alias->dev_id = alias; | 63 | dev_alias->dev_id = alias; |
64 | if (pdev != dev_alias->pdev) | 64 | if (pdev != dev_alias->pdev) |
65 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | 65 | dev_alias->count += its_pci_msi_vec_count(pdev); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 26b55c53755f..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) | |||
719 | out: | 719 | out: |
720 | spin_unlock(&lpi_lock); | 720 | spin_unlock(&lpi_lock); |
721 | 721 | ||
722 | if (!bitmap) | ||
723 | *base = *nr_ids = 0; | ||
724 | |||
722 | return bitmap; | 725 | return bitmap; |
723 | } | 726 | } |
724 | 727 | ||
@@ -898,8 +901,10 @@ retry_baser: | |||
898 | * non-cacheable as well. | 901 | * non-cacheable as well. |
899 | */ | 902 | */ |
900 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | 903 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; |
901 | if (!shr) | 904 | if (!shr) { |
902 | cache = GITS_BASER_nC; | 905 | cache = GITS_BASER_nC; |
906 | __flush_dcache_area(base, alloc_size); | ||
907 | } | ||
903 | goto retry_baser; | 908 | goto retry_baser; |
904 | } | 909 | } |
905 | 910 | ||
@@ -1140,6 +1145,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1140 | return NULL; | 1145 | return NULL; |
1141 | } | 1146 | } |
1142 | 1147 | ||
1148 | __flush_dcache_area(itt, sz); | ||
1149 | |||
1143 | dev->its = its; | 1150 | dev->its = its; |
1144 | dev->itt = itt; | 1151 | dev->itt = itt; |
1145 | dev->nr_ites = nr_ites; | 1152 | dev->nr_ites = nr_ites; |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7deed6ef54c2..36ecfc870e5a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -70,11 +70,6 @@ static inline int gic_irq_in_rdist(struct irq_data *d) | |||
70 | return gic_irq(d) < 32; | 70 | return gic_irq(d) < 32; |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline bool forwarded_irq(struct irq_data *d) | ||
74 | { | ||
75 | return d->handler_data != NULL; | ||
76 | } | ||
77 | |||
78 | static inline void __iomem *gic_dist_base(struct irq_data *d) | 73 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
79 | { | 74 | { |
80 | if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ | 75 | if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ |
@@ -249,7 +244,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d) | |||
249 | * disabled/masked will not get "stuck", because there is | 244 | * disabled/masked will not get "stuck", because there is |
250 | * noone to deactivate it (guest is being terminated). | 245 | * noone to deactivate it (guest is being terminated). |
251 | */ | 246 | */ |
252 | if (forwarded_irq(d)) | 247 | if (irqd_is_forwarded_to_vcpu(d)) |
253 | gic_poke_irq(d, GICD_ICACTIVER); | 248 | gic_poke_irq(d, GICD_ICACTIVER); |
254 | } | 249 | } |
255 | 250 | ||
@@ -324,7 +319,7 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) | |||
324 | * No need to deactivate an LPI, or an interrupt that | 319 | * No need to deactivate an LPI, or an interrupt that |
325 | * is is getting forwarded to a vcpu. | 320 | * is is getting forwarded to a vcpu. |
326 | */ | 321 | */ |
327 | if (gic_irq(d) >= 8192 || forwarded_irq(d)) | 322 | if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) |
328 | return; | 323 | return; |
329 | gic_write_dir(gic_irq(d)); | 324 | gic_write_dir(gic_irq(d)); |
330 | } | 325 | } |
@@ -357,7 +352,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
357 | 352 | ||
358 | static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) | 353 | static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) |
359 | { | 354 | { |
360 | d->handler_data = vcpu; | 355 | if (vcpu) |
356 | irqd_set_forwarded_to_vcpu(d); | ||
357 | else | ||
358 | irqd_clr_forwarded_to_vcpu(d); | ||
361 | return 0; | 359 | return 0; |
362 | } | 360 | } |
363 | 361 | ||
@@ -754,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
754 | irq_set_percpu_devid(irq); | 752 | irq_set_percpu_devid(irq); |
755 | irq_domain_set_info(d, irq, hw, chip, d->host_data, | 753 | irq_domain_set_info(d, irq, hw, chip, d->host_data, |
756 | handle_percpu_devid_irq, NULL, NULL); | 754 | handle_percpu_devid_irq, NULL, NULL); |
757 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | 755 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
758 | } | 756 | } |
759 | /* SPIs */ | 757 | /* SPIs */ |
760 | if (hw >= 32 && hw < gic_data.irq_nr) { | 758 | if (hw >= 32 && hw < gic_data.irq_nr) { |
761 | irq_domain_set_info(d, irq, hw, chip, d->host_data, | 759 | irq_domain_set_info(d, irq, hw, chip, d->host_data, |
762 | handle_fasteoi_irq, NULL, NULL); | 760 | handle_fasteoi_irq, NULL, NULL); |
763 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 761 | irq_set_probe(irq); |
764 | } | 762 | } |
765 | /* LPIs */ | 763 | /* LPIs */ |
766 | if (hw >= 8192 && hw < GIC_ID_NR) { | 764 | if (hw >= 8192 && hw < GIC_ID_NR) { |
@@ -768,7 +766,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
768 | return -EPERM; | 766 | return -EPERM; |
769 | irq_domain_set_info(d, irq, hw, chip, d->host_data, | 767 | irq_domain_set_info(d, irq, hw, chip, d->host_data, |
770 | handle_fasteoi_irq, NULL, NULL); | 768 | handle_fasteoi_irq, NULL, NULL); |
771 | set_irq_flags(irq, IRQF_VALID); | ||
772 | } | 769 | } |
773 | 770 | ||
774 | return 0; | 771 | return 0; |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index e6b7ed537952..982c09c2d791 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -145,29 +145,10 @@ static inline bool cascading_gic_irq(struct irq_data *d) | |||
145 | void *data = irq_data_get_irq_handler_data(d); | 145 | void *data = irq_data_get_irq_handler_data(d); |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * If handler_data pointing to one of the secondary GICs, then | 148 | * If handler_data is set, this is a cascading interrupt, and |
149 | * this is a cascading interrupt, and it cannot possibly be | 149 | * it cannot possibly be forwarded. |
150 | * forwarded. | ||
151 | */ | 150 | */ |
152 | if (data >= (void *)(gic_data + 1) && | 151 | return data != NULL; |
153 | data < (void *)(gic_data + MAX_GIC_NR)) | ||
154 | return true; | ||
155 | |||
156 | return false; | ||
157 | } | ||
158 | |||
159 | static inline bool forwarded_irq(struct irq_data *d) | ||
160 | { | ||
161 | /* | ||
162 | * A forwarded interrupt: | ||
163 | * - is on the primary GIC | ||
164 | * - has its handler_data set to a value | ||
165 | * - that isn't a secondary GIC | ||
166 | */ | ||
167 | if (d->handler_data && !cascading_gic_irq(d)) | ||
168 | return true; | ||
169 | |||
170 | return false; | ||
171 | } | 152 | } |
172 | 153 | ||
173 | /* | 154 | /* |
@@ -201,7 +182,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d) | |||
201 | * disabled/masked will not get "stuck", because there is | 182 | * disabled/masked will not get "stuck", because there is |
202 | * noone to deactivate it (guest is being terminated). | 183 | * noone to deactivate it (guest is being terminated). |
203 | */ | 184 | */ |
204 | if (forwarded_irq(d)) | 185 | if (irqd_is_forwarded_to_vcpu(d)) |
205 | gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); | 186 | gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); |
206 | } | 187 | } |
207 | 188 | ||
@@ -218,7 +199,7 @@ static void gic_eoi_irq(struct irq_data *d) | |||
218 | static void gic_eoimode1_eoi_irq(struct irq_data *d) | 199 | static void gic_eoimode1_eoi_irq(struct irq_data *d) |
219 | { | 200 | { |
220 | /* Do not deactivate an IRQ forwarded to a vcpu. */ | 201 | /* Do not deactivate an IRQ forwarded to a vcpu. */ |
221 | if (forwarded_irq(d)) | 202 | if (irqd_is_forwarded_to_vcpu(d)) |
222 | return; | 203 | return; |
223 | 204 | ||
224 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); | 205 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); |
@@ -296,7 +277,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) | |||
296 | if (cascading_gic_irq(d)) | 277 | if (cascading_gic_irq(d)) |
297 | return -EINVAL; | 278 | return -EINVAL; |
298 | 279 | ||
299 | d->handler_data = vcpu; | 280 | if (vcpu) |
281 | irqd_set_forwarded_to_vcpu(d); | ||
282 | else | ||
283 | irqd_clr_forwarded_to_vcpu(d); | ||
300 | return 0; | 284 | return 0; |
301 | } | 285 | } |
302 | 286 | ||
@@ -357,7 +341,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | |||
357 | } while (1); | 341 | } while (1); |
358 | } | 342 | } |
359 | 343 | ||
360 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | 344 | static void gic_handle_cascade_irq(struct irq_desc *desc) |
361 | { | 345 | { |
362 | struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); | 346 | struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); |
363 | struct irq_chip *chip = irq_desc_get_chip(desc); | 347 | struct irq_chip *chip = irq_desc_get_chip(desc); |
@@ -376,7 +360,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
376 | 360 | ||
377 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); | 361 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
378 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) | 362 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) |
379 | handle_bad_irq(cascade_irq, desc); | 363 | handle_bad_irq(desc); |
380 | else | 364 | else |
381 | generic_handle_irq(cascade_irq); | 365 | generic_handle_irq(cascade_irq); |
382 | 366 | ||
@@ -906,11 +890,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
906 | irq_set_percpu_devid(irq); | 890 | irq_set_percpu_devid(irq); |
907 | irq_domain_set_info(d, irq, hw, chip, d->host_data, | 891 | irq_domain_set_info(d, irq, hw, chip, d->host_data, |
908 | handle_percpu_devid_irq, NULL, NULL); | 892 | handle_percpu_devid_irq, NULL, NULL); |
909 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | 893 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
910 | } else { | 894 | } else { |
911 | irq_domain_set_info(d, irq, hw, chip, d->host_data, | 895 | irq_domain_set_info(d, irq, hw, chip, d->host_data, |
912 | handle_fasteoi_irq, NULL, NULL); | 896 | handle_fasteoi_irq, NULL, NULL); |
913 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 897 | irq_set_probe(irq); |
914 | } | 898 | } |
915 | return 0; | 899 | return 0; |
916 | } | 900 | } |
@@ -1119,12 +1103,49 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
1119 | #ifdef CONFIG_OF | 1103 | #ifdef CONFIG_OF |
1120 | static int gic_cnt __initdata; | 1104 | static int gic_cnt __initdata; |
1121 | 1105 | ||
1106 | static bool gic_check_eoimode(struct device_node *node, void __iomem **base) | ||
1107 | { | ||
1108 | struct resource cpuif_res; | ||
1109 | |||
1110 | of_address_to_resource(node, 1, &cpuif_res); | ||
1111 | |||
1112 | if (!is_hyp_mode_available()) | ||
1113 | return false; | ||
1114 | if (resource_size(&cpuif_res) < SZ_8K) | ||
1115 | return false; | ||
1116 | if (resource_size(&cpuif_res) == SZ_128K) { | ||
1117 | u32 val_low, val_high; | ||
1118 | |||
1119 | /* | ||
1120 | * Verify that we have the first 4kB of a GIC400 | ||
1121 | * aliased over the first 64kB by checking the | ||
1122 | * GICC_IIDR register on both ends. | ||
1123 | */ | ||
1124 | val_low = readl_relaxed(*base + GIC_CPU_IDENT); | ||
1125 | val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); | ||
1126 | if ((val_low & 0xffff0fff) != 0x0202043B || | ||
1127 | val_low != val_high) | ||
1128 | return false; | ||
1129 | |||
1130 | /* | ||
1131 | * Move the base up by 60kB, so that we have a 8kB | ||
1132 | * contiguous region, which allows us to use GICC_DIR | ||
1133 | * at its normal offset. Please pass me that bucket. | ||
1134 | */ | ||
1135 | *base += 0xf000; | ||
1136 | cpuif_res.start += 0xf000; | ||
1137 | pr_warn("GIC: Adjusting CPU interface base to %pa", | ||
1138 | &cpuif_res.start); | ||
1139 | } | ||
1140 | |||
1141 | return true; | ||
1142 | } | ||
1143 | |||
1122 | static int __init | 1144 | static int __init |
1123 | gic_of_init(struct device_node *node, struct device_node *parent) | 1145 | gic_of_init(struct device_node *node, struct device_node *parent) |
1124 | { | 1146 | { |
1125 | void __iomem *cpu_base; | 1147 | void __iomem *cpu_base; |
1126 | void __iomem *dist_base; | 1148 | void __iomem *dist_base; |
1127 | struct resource cpu_res; | ||
1128 | u32 percpu_offset; | 1149 | u32 percpu_offset; |
1129 | int irq; | 1150 | int irq; |
1130 | 1151 | ||
@@ -1137,14 +1158,11 @@ gic_of_init(struct device_node *node, struct device_node *parent) | |||
1137 | cpu_base = of_iomap(node, 1); | 1158 | cpu_base = of_iomap(node, 1); |
1138 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | 1159 | WARN(!cpu_base, "unable to map gic cpu registers\n"); |
1139 | 1160 | ||
1140 | of_address_to_resource(node, 1, &cpu_res); | ||
1141 | |||
1142 | /* | 1161 | /* |
1143 | * Disable split EOI/Deactivate if either HYP is not available | 1162 | * Disable split EOI/Deactivate if either HYP is not available |
1144 | * or the CPU interface is too small. | 1163 | * or the CPU interface is too small. |
1145 | */ | 1164 | */ |
1146 | if (gic_cnt == 0 && (!is_hyp_mode_available() || | 1165 | if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) |
1147 | resource_size(&cpu_res) < SZ_8K)) | ||
1148 | static_key_slow_dec(&supports_deactivate); | 1166 | static_key_slow_dec(&supports_deactivate); |
1149 | 1167 | ||
1150 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) | 1168 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index a0128c7c98dd..8f3ca8f3a62b 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c | |||
@@ -307,11 +307,11 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
307 | irq_set_percpu_devid(irq); | 307 | irq_set_percpu_devid(irq); |
308 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | 308 | irq_set_chip_and_handler(irq, &hip04_irq_chip, |
309 | handle_percpu_devid_irq); | 309 | handle_percpu_devid_irq); |
310 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | 310 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
311 | } else { | 311 | } else { |
312 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | 312 | irq_set_chip_and_handler(irq, &hip04_irq_chip, |
313 | handle_fasteoi_irq); | 313 | handle_fasteoi_irq); |
314 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 314 | irq_set_probe(irq); |
315 | } | 315 | } |
316 | irq_set_chip_data(irq, d->host_data); | 316 | irq_set_chip_data(irq, d->host_data); |
317 | return 0; | 317 | return 0; |
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 4836102ba312..e484fd255321 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c | |||
@@ -352,7 +352,7 @@ void __init init_i8259_irqs(void) | |||
352 | __init_i8259_irqs(NULL); | 352 | __init_i8259_irqs(NULL); |
353 | } | 353 | } |
354 | 354 | ||
355 | static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc) | 355 | static void i8259_irq_dispatch(struct irq_desc *desc) |
356 | { | 356 | { |
357 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | 357 | struct irq_domain *domain = irq_desc_get_handler_data(desc); |
358 | int hwirq = i8259_irq(); | 358 | int hwirq = i8259_irq(); |
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index 841604b81004..c02d29c9dc05 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c | |||
@@ -218,7 +218,7 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on) | |||
218 | return 0; | 218 | return 0; |
219 | } | 219 | } |
220 | 220 | ||
221 | static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc) | 221 | static void pdc_intc_perip_isr(struct irq_desc *desc) |
222 | { | 222 | { |
223 | unsigned int irq = irq_desc_get_irq(desc); | 223 | unsigned int irq = irq_desc_get_irq(desc); |
224 | struct pdc_intc_priv *priv; | 224 | struct pdc_intc_priv *priv; |
@@ -240,7 +240,7 @@ found: | |||
240 | generic_handle_irq(irq_no); | 240 | generic_handle_irq(irq_no); |
241 | } | 241 | } |
242 | 242 | ||
243 | static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) | 243 | static void pdc_intc_syswake_isr(struct irq_desc *desc) |
244 | { | 244 | { |
245 | struct pdc_intc_priv *priv; | 245 | struct pdc_intc_priv *priv; |
246 | unsigned int syswake, irq_no; | 246 | unsigned int syswake, irq_no; |
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index c1517267b5db..deb89d63a728 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c | |||
@@ -83,7 +83,7 @@ static void keystone_irq_ack(struct irq_data *d) | |||
83 | /* nothing to do here */ | 83 | /* nothing to do here */ |
84 | } | 84 | } |
85 | 85 | ||
86 | static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc) | 86 | static void keystone_irq_handler(struct irq_desc *desc) |
87 | { | 87 | { |
88 | unsigned int irq = irq_desc_get_irq(desc); | 88 | unsigned int irq = irq_desc_get_irq(desc); |
89 | struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); | 89 | struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); |
@@ -127,7 +127,7 @@ static int keystone_irq_map(struct irq_domain *h, unsigned int virq, | |||
127 | 127 | ||
128 | irq_set_chip_data(virq, kirq); | 128 | irq_set_chip_data(virq, kirq); |
129 | irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq); | 129 | irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq); |
130 | set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); | 130 | irq_set_probe(virq); |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c index 5f4c52928d16..8c38b3d92e1c 100644 --- a/drivers/irqchip/irq-metag-ext.c +++ b/drivers/irqchip/irq-metag-ext.c | |||
@@ -446,7 +446,7 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type) | |||
446 | * Whilst using TR2 to detect external interrupts is a software convention it is | 446 | * Whilst using TR2 to detect external interrupts is a software convention it is |
447 | * (hopefully) unlikely to change. | 447 | * (hopefully) unlikely to change. |
448 | */ | 448 | */ |
449 | static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) | 449 | static void meta_intc_irq_demux(struct irq_desc *desc) |
450 | { | 450 | { |
451 | struct meta_intc_priv *priv = &meta_intc_priv; | 451 | struct meta_intc_priv *priv = &meta_intc_priv; |
452 | irq_hw_number_t hw; | 452 | irq_hw_number_t hw; |
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c index 3d23ce3edb5c..a5f053bd2f44 100644 --- a/drivers/irqchip/irq-metag.c +++ b/drivers/irqchip/irq-metag.c | |||
@@ -220,7 +220,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data, | |||
220 | * occurred. It is this function's job to demux this irq and | 220 | * occurred. It is this function's job to demux this irq and |
221 | * figure out exactly which trigger needs servicing. | 221 | * figure out exactly which trigger needs servicing. |
222 | */ | 222 | */ |
223 | static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) | 223 | static void metag_internal_irq_demux(struct irq_desc *desc) |
224 | { | 224 | { |
225 | struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); | 225 | struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); |
226 | irq_hw_number_t hw; | 226 | irq_hw_number_t hw; |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 1764bcf8ee6b..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) | |||
320 | intrmask[i] = gic_read(intrmask_reg); | 320 | intrmask[i] = gic_read(intrmask_reg); |
321 | pending_reg += gic_reg_step; | 321 | pending_reg += gic_reg_step; |
322 | intrmask_reg += gic_reg_step; | 322 | intrmask_reg += gic_reg_step; |
323 | |||
324 | if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) | ||
325 | continue; | ||
326 | |||
327 | pending[i] |= (u64)gic_read(pending_reg) << 32; | ||
328 | intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; | ||
329 | pending_reg += gic_reg_step; | ||
330 | intrmask_reg += gic_reg_step; | ||
323 | } | 331 | } |
324 | 332 | ||
325 | bitmap_and(pending, pending, intrmask, gic_shared_intrs); | 333 | bitmap_and(pending, pending, intrmask, gic_shared_intrs); |
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | |||
426 | spin_lock_irqsave(&gic_lock, flags); | 434 | spin_lock_irqsave(&gic_lock, flags); |
427 | 435 | ||
428 | /* Re-route this IRQ */ | 436 | /* Re-route this IRQ */ |
429 | gic_map_to_vpe(irq, cpumask_first(&tmp)); | 437 | gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); |
430 | 438 | ||
431 | /* Update the pcpu_masks */ | 439 | /* Update the pcpu_masks */ |
432 | for (i = 0; i < NR_CPUS; i++) | 440 | for (i = 0; i < NR_CPUS; i++) |
@@ -546,7 +554,7 @@ static void __gic_irq_dispatch(void) | |||
546 | gic_handle_shared_int(false); | 554 | gic_handle_shared_int(false); |
547 | } | 555 | } |
548 | 556 | ||
549 | static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) | 557 | static void gic_irq_dispatch(struct irq_desc *desc) |
550 | { | 558 | { |
551 | gic_handle_local_int(true); | 559 | gic_handle_local_int(true); |
552 | gic_handle_shared_int(true); | 560 | gic_handle_shared_int(true); |
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, | |||
599 | GIC_SHARED_TO_HWIRQ(intr)); | 607 | GIC_SHARED_TO_HWIRQ(intr)); |
600 | int i; | 608 | int i; |
601 | 609 | ||
602 | gic_map_to_vpe(intr, cpu); | 610 | gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); |
603 | for (i = 0; i < NR_CPUS; i++) | 611 | for (i = 0; i < NR_CPUS; i++) |
604 | clear_bit(intr, pcpu_masks[i].pcpu_mask); | 612 | clear_bit(intr, pcpu_masks[i].pcpu_mask); |
605 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); | 613 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); |
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 781ed6e71dbb..013fc9659a84 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c | |||
@@ -129,7 +129,7 @@ struct irq_chip icu_irq_chip = { | |||
129 | .irq_unmask = icu_unmask_irq, | 129 | .irq_unmask = icu_unmask_irq, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc) | 132 | static void icu_mux_irq_demux(struct irq_desc *desc) |
133 | { | 133 | { |
134 | unsigned int irq = irq_desc_get_irq(desc); | 134 | unsigned int irq = irq_desc_get_irq(desc); |
135 | struct irq_domain *domain; | 135 | struct irq_domain *domain; |
@@ -164,7 +164,6 @@ static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
164 | irq_hw_number_t hw) | 164 | irq_hw_number_t hw) |
165 | { | 165 | { |
166 | irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); | 166 | irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); |
167 | set_irq_flags(irq, IRQF_VALID); | ||
168 | return 0; | 167 | return 0; |
169 | } | 168 | } |
170 | 169 | ||
@@ -234,7 +233,6 @@ void __init icu_init_irq(void) | |||
234 | for (irq = 0; irq < 64; irq++) { | 233 | for (irq = 0; irq < 64; irq++) { |
235 | icu_mask_irq(irq_get_irq_data(irq)); | 234 | icu_mask_irq(irq_get_irq_data(irq)); |
236 | irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); | 235 | irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); |
237 | set_irq_flags(irq, IRQF_VALID); | ||
238 | } | 236 | } |
239 | irq_set_default_host(icu_data[0].domain); | 237 | irq_set_default_host(icu_data[0].domain); |
240 | set_handle_irq(mmp_handle_irq); | 238 | set_handle_irq(mmp_handle_irq); |
@@ -337,7 +335,6 @@ void __init mmp2_init_icu(void) | |||
337 | irq_set_chip_and_handler(irq, &icu_irq_chip, | 335 | irq_set_chip_and_handler(irq, &icu_irq_chip, |
338 | handle_level_irq); | 336 | handle_level_irq); |
339 | } | 337 | } |
340 | set_irq_flags(irq, IRQF_VALID); | ||
341 | } | 338 | } |
342 | irq_set_default_host(icu_data[0].domain); | 339 | irq_set_default_host(icu_data[0].domain); |
343 | set_handle_irq(mmp2_handle_irq); | 340 | set_handle_irq(mmp2_handle_irq); |
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 1faf812f3dc8..604df63e2edf 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c | |||
@@ -84,7 +84,6 @@ static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
84 | irq_hw_number_t hw) | 84 | irq_hw_number_t hw) |
85 | { | 85 | { |
86 | irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); | 86 | irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); |
87 | set_irq_flags(virq, IRQF_VALID); | ||
88 | 87 | ||
89 | return 0; | 88 | return 0; |
90 | } | 89 | } |
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index 5ea999a724b5..be4c5a8c9659 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c | |||
@@ -106,7 +106,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init); | |||
106 | #define ORION_BRIDGE_IRQ_CAUSE 0x00 | 106 | #define ORION_BRIDGE_IRQ_CAUSE 0x00 |
107 | #define ORION_BRIDGE_IRQ_MASK 0x04 | 107 | #define ORION_BRIDGE_IRQ_MASK 0x04 |
108 | 108 | ||
109 | static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) | 109 | static void orion_bridge_irq_handler(struct irq_desc *desc) |
110 | { | 110 | { |
111 | struct irq_domain *d = irq_desc_get_handler_data(desc); | 111 | struct irq_domain *d = irq_desc_get_handler_data(desc); |
112 | 112 | ||
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 0670ab4e3897..9525335723f6 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
@@ -283,6 +283,9 @@ static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type) | |||
283 | static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on) | 283 | static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on) |
284 | { | 284 | { |
285 | struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); | 285 | struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); |
286 | int hw_irq = irqd_to_hwirq(d); | ||
287 | |||
288 | irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); | ||
286 | 289 | ||
287 | if (!p->clk) | 290 | if (!p->clk) |
288 | return 0; | 291 | return 0; |
@@ -332,6 +335,12 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) | |||
332 | return status; | 335 | return status; |
333 | } | 336 | } |
334 | 337 | ||
338 | /* | ||
339 | * This lock class tells lockdep that INTC External IRQ Pin irqs are in a | ||
340 | * different category than their parents, so it won't report false recursion. | ||
341 | */ | ||
342 | static struct lock_class_key intc_irqpin_irq_lock_class; | ||
343 | |||
335 | static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, | 344 | static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, |
336 | irq_hw_number_t hw) | 345 | irq_hw_number_t hw) |
337 | { | 346 | { |
@@ -342,8 +351,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, | |||
342 | 351 | ||
343 | intc_irqpin_dbg(&p->irq[hw], "map"); | 352 | intc_irqpin_dbg(&p->irq[hw], "map"); |
344 | irq_set_chip_data(virq, h->host_data); | 353 | irq_set_chip_data(virq, h->host_data); |
354 | irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); | ||
345 | irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); | 355 | irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); |
346 | set_irq_flags(virq, IRQF_VALID); /* kill me now */ | ||
347 | return 0; | 356 | return 0; |
348 | } | 357 | } |
349 | 358 | ||
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 2aa3add711a6..35bf97ba4a3d 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c | |||
@@ -121,6 +121,9 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) | |||
121 | static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) | 121 | static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) |
122 | { | 122 | { |
123 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); | 123 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); |
124 | int hw_irq = irqd_to_hwirq(d); | ||
125 | |||
126 | irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); | ||
124 | 127 | ||
125 | if (!p->clk) | 128 | if (!p->clk) |
126 | return 0; | 129 | return 0; |
@@ -150,6 +153,12 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) | |||
150 | return IRQ_NONE; | 153 | return IRQ_NONE; |
151 | } | 154 | } |
152 | 155 | ||
156 | /* | ||
157 | * This lock class tells lockdep that IRQC irqs are in a different | ||
158 | * category than their parents, so it won't report false recursion. | ||
159 | */ | ||
160 | static struct lock_class_key irqc_irq_lock_class; | ||
161 | |||
153 | static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, | 162 | static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, |
154 | irq_hw_number_t hw) | 163 | irq_hw_number_t hw) |
155 | { | 164 | { |
@@ -157,6 +166,7 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, | |||
157 | 166 | ||
158 | irqc_dbg(&p->irq[hw], "map"); | 167 | irqc_dbg(&p->irq[hw], "map"); |
159 | irq_set_chip_data(virq, h->host_data); | 168 | irq_set_chip_data(virq, h->host_data); |
169 | irq_set_lockdep_class(virq, &irqc_irq_lock_class); | ||
160 | irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); | 170 | irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); |
161 | return 0; | 171 | return 0; |
162 | } | 172 | } |
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index 506d9f20ca51..7154b011ddd2 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c | |||
@@ -298,7 +298,7 @@ static struct irq_chip s3c_irq_eint0t4 = { | |||
298 | .irq_set_type = s3c_irqext0_type, | 298 | .irq_set_type = s3c_irqext0_type, |
299 | }; | 299 | }; |
300 | 300 | ||
301 | static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc) | 301 | static void s3c_irq_demux(struct irq_desc *desc) |
302 | { | 302 | { |
303 | struct irq_chip *chip = irq_desc_get_chip(desc); | 303 | struct irq_chip *chip = irq_desc_get_chip(desc); |
304 | struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); | 304 | struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); |
@@ -466,13 +466,11 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, | |||
466 | 466 | ||
467 | irq_set_chip_data(virq, irq_data); | 467 | irq_set_chip_data(virq, irq_data); |
468 | 468 | ||
469 | set_irq_flags(virq, IRQF_VALID); | ||
470 | |||
471 | if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { | 469 | if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { |
472 | if (irq_data->parent_irq > 31) { | 470 | if (irq_data->parent_irq > 31) { |
473 | pr_err("irq-s3c24xx: parent irq %lu is out of range\n", | 471 | pr_err("irq-s3c24xx: parent irq %lu is out of range\n", |
474 | irq_data->parent_irq); | 472 | irq_data->parent_irq); |
475 | goto err; | 473 | return -EINVAL; |
476 | } | 474 | } |
477 | 475 | ||
478 | parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; | 476 | parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; |
@@ -485,18 +483,12 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, | |||
485 | if (!irqno) { | 483 | if (!irqno) { |
486 | pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", | 484 | pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", |
487 | irq_data->parent_irq); | 485 | irq_data->parent_irq); |
488 | goto err; | 486 | return -EINVAL; |
489 | } | 487 | } |
490 | irq_set_chained_handler(irqno, s3c_irq_demux); | 488 | irq_set_chained_handler(irqno, s3c_irq_demux); |
491 | } | 489 | } |
492 | 490 | ||
493 | return 0; | 491 | return 0; |
494 | |||
495 | err: | ||
496 | set_irq_flags(virq, 0); | ||
497 | |||
498 | /* the only error can result from bad mapping data*/ | ||
499 | return -EINVAL; | ||
500 | } | 492 | } |
501 | 493 | ||
502 | static const struct irq_domain_ops s3c24xx_irq_ops = { | 494 | static const struct irq_domain_ops s3c24xx_irq_ops = { |
@@ -1174,8 +1166,6 @@ static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, | |||
1174 | 1166 | ||
1175 | irq_set_chip_data(virq, irq_data); | 1167 | irq_set_chip_data(virq, irq_data); |
1176 | 1168 | ||
1177 | set_irq_flags(virq, IRQF_VALID); | ||
1178 | |||
1179 | return 0; | 1169 | return 0; |
1180 | } | 1170 | } |
1181 | 1171 | ||
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index 4ad3e7c69aa7..0704362f4c82 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c | |||
@@ -83,7 +83,7 @@ static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, | |||
83 | irq_hw_number_t hw) | 83 | irq_hw_number_t hw) |
84 | { | 84 | { |
85 | irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq); | 85 | irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq); |
86 | set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); | 86 | irq_set_probe(virq); |
87 | 87 | ||
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 772a82cacbf7..c143dd58410c 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c | |||
@@ -58,7 +58,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) | |||
58 | return irq_reg_readl(gc, off); | 58 | return irq_reg_readl(gc, off); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) | 61 | static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) |
62 | { | 62 | { |
63 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | 63 | struct irq_domain *domain = irq_desc_get_handler_data(desc); |
64 | struct irq_chip *chip = irq_desc_get_chip(desc); | 64 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 331829661366..848d782a2a3b 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c | |||
@@ -97,7 +97,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) | |||
97 | return IRQ_SET_MASK_OK; | 97 | return IRQ_SET_MASK_OK; |
98 | } | 98 | } |
99 | 99 | ||
100 | static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc) | 100 | static void tb10x_irq_cascade(struct irq_desc *desc) |
101 | { | 101 | { |
102 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | 102 | struct irq_domain *domain = irq_desc_get_handler_data(desc); |
103 | unsigned int irq = irq_desc_get_irq(desc); | 103 | unsigned int irq = irq_desc_get_irq(desc); |
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 16123f688768..598ab3f0e0ac 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c | |||
@@ -65,19 +65,19 @@ static void fpga_irq_unmask(struct irq_data *d) | |||
65 | writel(mask, f->base + IRQ_ENABLE_SET); | 65 | writel(mask, f->base + IRQ_ENABLE_SET); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc) | 68 | static void fpga_irq_handle(struct irq_desc *desc) |
69 | { | 69 | { |
70 | struct fpga_irq_data *f = irq_desc_get_handler_data(desc); | 70 | struct fpga_irq_data *f = irq_desc_get_handler_data(desc); |
71 | unsigned int irq = irq_desc_get_irq(desc); | ||
72 | u32 status = readl(f->base + IRQ_STATUS); | 71 | u32 status = readl(f->base + IRQ_STATUS); |
73 | 72 | ||
74 | if (status == 0) { | 73 | if (status == 0) { |
75 | do_bad_IRQ(irq, desc); | 74 | do_bad_IRQ(desc); |
76 | return; | 75 | return; |
77 | } | 76 | } |
78 | 77 | ||
79 | do { | 78 | do { |
80 | irq = ffs(status) - 1; | 79 | unsigned int irq = ffs(status) - 1; |
80 | |||
81 | status &= ~(1 << irq); | 81 | status &= ~(1 << irq); |
82 | generic_handle_irq(irq_find_mapping(f->domain, irq)); | 82 | generic_handle_irq(irq_find_mapping(f->domain, irq)); |
83 | } while (status); | 83 | } while (status); |
@@ -128,7 +128,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
128 | irq_set_chip_data(irq, f); | 128 | irq_set_chip_data(irq, f); |
129 | irq_set_chip_and_handler(irq, &f->chip, | 129 | irq_set_chip_and_handler(irq, &f->chip, |
130 | handle_level_irq); | 130 | handle_level_irq); |
131 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 131 | irq_set_probe(irq); |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 03846dff4212..b956dfffe78c 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c | |||
@@ -201,7 +201,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
201 | return -EPERM; | 201 | return -EPERM; |
202 | irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); | 202 | irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); |
203 | irq_set_chip_data(irq, v->base); | 203 | irq_set_chip_data(irq, v->base); |
204 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 204 | irq_set_probe(irq); |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
207 | 207 | ||
@@ -225,7 +225,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) | |||
225 | return handled; | 225 | return handled; |
226 | } | 226 | } |
227 | 227 | ||
228 | static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) | 228 | static void vic_handle_irq_cascaded(struct irq_desc *desc) |
229 | { | 229 | { |
230 | u32 stat, hwirq; | 230 | u32 stat, hwirq; |
231 | struct irq_chip *host_chip = irq_desc_get_chip(desc); | 231 | struct irq_chip *host_chip = irq_desc_get_chip(desc); |
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index 8371d9978d31..f9af0af21751 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c | |||
@@ -167,7 +167,6 @@ static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, | |||
167 | irq_hw_number_t hw) | 167 | irq_hw_number_t hw) |
168 | { | 168 | { |
169 | irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); | 169 | irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); |
170 | set_irq_flags(virq, IRQF_VALID); | ||
171 | 170 | ||
172 | return 0; | 171 | return 0; |
173 | } | 172 | } |
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 4cbd9c5dc1e6..1ccd2abed65f 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c | |||
@@ -182,7 +182,7 @@ static struct spear_shirq *spear320_shirq_blocks[] = { | |||
182 | &spear320_shirq_intrcomm_ras, | 182 | &spear320_shirq_intrcomm_ras, |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static void shirq_handler(unsigned __irq, struct irq_desc *desc) | 185 | static void shirq_handler(struct irq_desc *desc) |
186 | { | 186 | { |
187 | struct spear_shirq *shirq = irq_desc_get_handler_data(desc); | 187 | struct spear_shirq *shirq = irq_desc_get_handler_data(desc); |
188 | u32 pend; | 188 | u32 pend; |
@@ -211,7 +211,6 @@ static void __init spear_shirq_register(struct spear_shirq *shirq, | |||
211 | for (i = 0; i < shirq->nr_irqs; i++) { | 211 | for (i = 0; i < shirq->nr_irqs; i++) { |
212 | irq_set_chip_and_handler(shirq->virq_base + i, | 212 | irq_set_chip_and_handler(shirq->virq_base + i, |
213 | shirq->irq_chip, handle_simple_irq); | 213 | shirq->irq_chip, handle_simple_irq); |
214 | set_irq_flags(shirq->virq_base + i, IRQF_VALID); | ||
215 | irq_set_chip_data(shirq->virq_base + i, shirq); | 214 | irq_set_chip_data(shirq->virq_base + i, shirq); |
216 | } | 215 | } |
217 | } | 216 | } |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 70f4255ff291..42990f2d0317 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -170,6 +170,7 @@ config LEDS_SUNFIRE | |||
170 | 170 | ||
171 | config LEDS_IPAQ_MICRO | 171 | config LEDS_IPAQ_MICRO |
172 | tristate "LED Support for the Compaq iPAQ h3xxx" | 172 | tristate "LED Support for the Compaq iPAQ h3xxx" |
173 | depends on LEDS_CLASS | ||
173 | depends on MFD_IPAQ_MICRO | 174 | depends on MFD_IPAQ_MICRO |
174 | help | 175 | help |
175 | Choose this option if you want to use the notification LED on | 176 | Choose this option if you want to use the notification LED on |
@@ -229,7 +230,7 @@ config LEDS_LP55XX_COMMON | |||
229 | tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" | 230 | tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" |
230 | depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 | 231 | depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 |
231 | select FW_LOADER | 232 | select FW_LOADER |
232 | select FW_LOADER_USER_HELPER_FALLBACK | 233 | select FW_LOADER_USER_HELPER |
233 | help | 234 | help |
234 | This option supports common operations for LP5521/5523/55231/5562/8501 | 235 | This option supports common operations for LP5521/5523/55231/5562/8501 |
235 | devices. | 236 | devices. |
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c index fd7c25fd29c1..ac77d36b630c 100644 --- a/drivers/leds/leds-aat1290.c +++ b/drivers/leds/leds-aat1290.c | |||
@@ -331,7 +331,7 @@ static void aat1290_led_validate_mm_current(struct aat1290_led *led, | |||
331 | cfg->max_brightness = b + 1; | 331 | cfg->max_brightness = b + 1; |
332 | } | 332 | } |
333 | 333 | ||
334 | int init_mm_current_scale(struct aat1290_led *led, | 334 | static int init_mm_current_scale(struct aat1290_led *led, |
335 | struct aat1290_led_config_data *cfg) | 335 | struct aat1290_led_config_data *cfg) |
336 | { | 336 | { |
337 | int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, | 337 | int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, |
@@ -559,6 +559,7 @@ static const struct of_device_id aat1290_led_dt_match[] = { | |||
559 | { .compatible = "skyworks,aat1290" }, | 559 | { .compatible = "skyworks,aat1290" }, |
560 | {}, | 560 | {}, |
561 | }; | 561 | }; |
562 | MODULE_DEVICE_TABLE(of, aat1290_led_dt_match); | ||
562 | 563 | ||
563 | static struct platform_driver aat1290_led_driver = { | 564 | static struct platform_driver aat1290_led_driver = { |
564 | .probe = aat1290_led_probe, | 565 | .probe = aat1290_led_probe, |
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 986fe1e28f84..1793727bc9ae 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c | |||
@@ -395,6 +395,7 @@ static const struct of_device_id bcm6328_leds_of_match[] = { | |||
395 | { .compatible = "brcm,bcm6328-leds", }, | 395 | { .compatible = "brcm,bcm6328-leds", }, |
396 | { }, | 396 | { }, |
397 | }; | 397 | }; |
398 | MODULE_DEVICE_TABLE(of, bcm6328_leds_of_match); | ||
398 | 399 | ||
399 | static struct platform_driver bcm6328_leds_driver = { | 400 | static struct platform_driver bcm6328_leds_driver = { |
400 | .probe = bcm6328_leds_probe, | 401 | .probe = bcm6328_leds_probe, |
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c index 21f96930b3be..7ea3526702e0 100644 --- a/drivers/leds/leds-bcm6358.c +++ b/drivers/leds/leds-bcm6358.c | |||
@@ -226,6 +226,7 @@ static const struct of_device_id bcm6358_leds_of_match[] = { | |||
226 | { .compatible = "brcm,bcm6358-leds", }, | 226 | { .compatible = "brcm,bcm6358-leds", }, |
227 | { }, | 227 | { }, |
228 | }; | 228 | }; |
229 | MODULE_DEVICE_TABLE(of, bcm6358_leds_of_match); | ||
229 | 230 | ||
230 | static struct platform_driver bcm6358_leds_driver = { | 231 | static struct platform_driver bcm6358_leds_driver = { |
231 | .probe = bcm6358_leds_probe, | 232 | .probe = bcm6358_leds_probe, |
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c index 2ae8c4d17ff8..feca07be85f5 100644 --- a/drivers/leds/leds-ktd2692.c +++ b/drivers/leds/leds-ktd2692.c | |||
@@ -426,6 +426,7 @@ static const struct of_device_id ktd2692_match[] = { | |||
426 | { .compatible = "kinetic,ktd2692", }, | 426 | { .compatible = "kinetic,ktd2692", }, |
427 | { /* sentinel */ }, | 427 | { /* sentinel */ }, |
428 | }; | 428 | }; |
429 | MODULE_DEVICE_TABLE(of, ktd2692_match); | ||
429 | 430 | ||
430 | static struct platform_driver ktd2692_driver = { | 431 | static struct platform_driver ktd2692_driver = { |
431 | .driver = { | 432 | .driver = { |
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c index df348a06d8c7..afbb1409b2e2 100644 --- a/drivers/leds/leds-max77693.c +++ b/drivers/leds/leds-max77693.c | |||
@@ -1080,6 +1080,7 @@ static const struct of_device_id max77693_led_dt_match[] = { | |||
1080 | { .compatible = "maxim,max77693-led" }, | 1080 | { .compatible = "maxim,max77693-led" }, |
1081 | {}, | 1081 | {}, |
1082 | }; | 1082 | }; |
1083 | MODULE_DEVICE_TABLE(of, max77693_led_dt_match); | ||
1083 | 1084 | ||
1084 | static struct platform_driver max77693_led_driver = { | 1085 | static struct platform_driver max77693_led_driver = { |
1085 | .probe = max77693_led_probe, | 1086 | .probe = max77693_led_probe, |
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index b33514d9f427..a95a61220169 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c | |||
@@ -337,6 +337,7 @@ static const struct of_device_id of_ns2_leds_match[] = { | |||
337 | { .compatible = "lacie,ns2-leds", }, | 337 | { .compatible = "lacie,ns2-leds", }, |
338 | {}, | 338 | {}, |
339 | }; | 339 | }; |
340 | MODULE_DEVICE_TABLE(of, of_ns2_leds_match); | ||
340 | #endif /* CONFIG_OF_GPIO */ | 341 | #endif /* CONFIG_OF_GPIO */ |
341 | 342 | ||
342 | struct ns2_led_priv { | 343 | struct ns2_led_priv { |
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index de36237d7c6b..051645498b53 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c | |||
@@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
74 | ret = -ENOTSUPP; | 74 | ret = -ENOTSUPP; |
75 | dev_err(&pdev->dev, | 75 | dev_err(&pdev->dev, |
76 | "IO mapped PCI devices are not supported\n"); | 76 | "IO mapped PCI devices are not supported\n"); |
77 | goto out_release; | 77 | goto out_iounmap; |
78 | } | 78 | } |
79 | 79 | ||
80 | pci_set_drvdata(pdev, priv); | 80 | pci_set_drvdata(pdev, priv); |
@@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
89 | 89 | ||
90 | ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); | 90 | ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); |
91 | if (ret < 0) | 91 | if (ret < 0) |
92 | goto out_iounmap; | 92 | goto out_mcb_bus; |
93 | num_cells = ret; | 93 | num_cells = ret; |
94 | 94 | ||
95 | dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); | 95 | dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); |
@@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
98 | 98 | ||
99 | return 0; | 99 | return 0; |
100 | 100 | ||
101 | out_mcb_bus: | ||
102 | mcb_release_bus(priv->bus); | ||
101 | out_iounmap: | 103 | out_iounmap: |
102 | iounmap(priv->base); | 104 | iounmap(priv->base); |
103 | out_release: | 105 | out_release: |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, | |||
1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) | 1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) |
1998 | ret = bitmap_storage_alloc(&store, chunks, | 1998 | ret = bitmap_storage_alloc(&store, chunks, |
1999 | !bitmap->mddev->bitmap_info.external, | 1999 | !bitmap->mddev->bitmap_info.external, |
2000 | bitmap->cluster_slot); | 2000 | mddev_is_clustered(bitmap->mddev) |
2001 | ? bitmap->cluster_slot : 0); | ||
2001 | if (ret) | 2002 | if (ret) |
2002 | goto err; | 2003 | goto err; |
2003 | 2004 | ||
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c | |||
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, | |||
436 | static struct dm_cache_policy_type wb_policy_type = { | 436 | static struct dm_cache_policy_type wb_policy_type = { |
437 | .name = "cleaner", | 437 | .name = "cleaner", |
438 | .version = {1, 0, 0}, | 438 | .version = {1, 0, 0}, |
439 | .hint_size = 0, | 439 | .hint_size = 4, |
440 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
441 | .create = wb_create | 441 | .create = wb_create |
442 | }; | 442 | }; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d60c88df5234..4b3b6f8aff0c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); | |||
968 | 968 | ||
969 | /* | 969 | /* |
970 | * Generate a new unfragmented bio with the given size | 970 | * Generate a new unfragmented bio with the given size |
971 | * This should never violate the device limitations | 971 | * This should never violate the device limitations (but only because |
972 | * max_segment_size is being constrained to PAGE_SIZE). | ||
972 | * | 973 | * |
973 | * This function may be called concurrently. If we allocate from the mempool | 974 | * This function may be called concurrently. If we allocate from the mempool |
974 | * concurrently, there is a possibility of deadlock. For example, if we have | 975 | * concurrently, there is a possibility of deadlock. For example, if we have |
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti, | |||
2045 | return fn(ti, cc->dev, cc->start, ti->len, data); | 2046 | return fn(ti, cc->dev, cc->start, ti->len, data); |
2046 | } | 2047 | } |
2047 | 2048 | ||
2049 | static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) | ||
2050 | { | ||
2051 | /* | ||
2052 | * Unfortunate constraint that is required to avoid the potential | ||
2053 | * for exceeding underlying device's max_segments limits -- due to | ||
2054 | * crypt_alloc_buffer() possibly allocating pages for the encryption | ||
2055 | * bio that are not as physically contiguous as the original bio. | ||
2056 | */ | ||
2057 | limits->max_segment_size = PAGE_SIZE; | ||
2058 | } | ||
2059 | |||
2048 | static struct target_type crypt_target = { | 2060 | static struct target_type crypt_target = { |
2049 | .name = "crypt", | 2061 | .name = "crypt", |
2050 | .version = {1, 14, 0}, | 2062 | .version = {1, 14, 1}, |
2051 | .module = THIS_MODULE, | 2063 | .module = THIS_MODULE, |
2052 | .ctr = crypt_ctr, | 2064 | .ctr = crypt_ctr, |
2053 | .dtr = crypt_dtr, | 2065 | .dtr = crypt_dtr, |
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = { | |||
2058 | .resume = crypt_resume, | 2070 | .resume = crypt_resume, |
2059 | .message = crypt_message, | 2071 | .message = crypt_message, |
2060 | .iterate_devices = crypt_iterate_devices, | 2072 | .iterate_devices = crypt_iterate_devices, |
2073 | .io_hints = crypt_io_hints, | ||
2061 | }; | 2074 | }; |
2062 | 2075 | ||
2063 | static int __init dm_crypt_init(void) | 2076 | static int __init dm_crypt_init(void) |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | } | 204 | } |
205 | 205 | ||
206 | tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); | 206 | tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); |
207 | if (!tmp_store) { | 207 | if (!tmp_store) { |
208 | ti->error = "Exception store allocation failed"; | 208 | ti->error = "Exception store allocation failed"; |
209 | return -ENOMEM; | 209 | return -ENOMEM; |
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
215 | else if (persistent == 'N') | 215 | else if (persistent == 'N') |
216 | type = get_type("N"); | 216 | type = get_type("N"); |
217 | else { | 217 | else { |
218 | ti->error = "Persistent flag is not P or N"; | 218 | ti->error = "Exception store type is not P or N"; |
219 | r = -EINVAL; | 219 | r = -EINVAL; |
220 | goto bad_type; | 220 | goto bad_type; |
221 | } | 221 | } |
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
233 | if (r) | 233 | if (r) |
234 | goto bad; | 234 | goto bad; |
235 | 235 | ||
236 | r = type->ctr(tmp_store, 0, NULL); | 236 | r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); |
237 | if (r) { | 237 | if (r) { |
238 | ti->error = "Exception store type constructor failed"; | 238 | ti->error = "Exception store type constructor failed"; |
239 | goto bad; | 239 | goto bad; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -42,8 +42,7 @@ struct dm_exception_store_type { | |||
42 | const char *name; | 42 | const char *name; |
43 | struct module *module; | 43 | struct module *module; |
44 | 44 | ||
45 | int (*ctr) (struct dm_exception_store *store, | 45 | int (*ctr) (struct dm_exception_store *store, char *options); |
46 | unsigned argc, char **argv); | ||
47 | 46 | ||
48 | /* | 47 | /* |
49 | * Destroys this object when you've finished with it. | 48 | * Destroys this object when you've finished with it. |
@@ -123,6 +122,8 @@ struct dm_exception_store { | |||
123 | unsigned chunk_shift; | 122 | unsigned chunk_shift; |
124 | 123 | ||
125 | void *context; | 124 | void *context; |
125 | |||
126 | bool userspace_supports_overflow; | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | /* | 129 | /* |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
329 | */ | 329 | */ |
330 | if (min_region_size > (1 << 13)) { | 330 | if (min_region_size > (1 << 13)) { |
331 | /* If not a power of 2, make it the next power of 2 */ | 331 | /* If not a power of 2, make it the next power of 2 */ |
332 | if (min_region_size & (min_region_size - 1)) | 332 | region_size = roundup_pow_of_two(min_region_size); |
333 | region_size = 1 << fls(region_size); | ||
334 | DMINFO("Choosing default region size of %lu sectors", | 333 | DMINFO("Choosing default region size of %lu sectors", |
335 | region_size); | 334 | region_size); |
336 | } else { | 335 | } else { |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..117a05e40090 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include "dm-exception-store.h" | 8 | #include "dm-exception-store.h" |
9 | 9 | ||
10 | #include <linux/ctype.h> | ||
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
12 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
@@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) | |||
843 | DMWARN("write header failed"); | 844 | DMWARN("write header failed"); |
844 | } | 845 | } |
845 | 846 | ||
846 | static int persistent_ctr(struct dm_exception_store *store, | 847 | static int persistent_ctr(struct dm_exception_store *store, char *options) |
847 | unsigned argc, char **argv) | ||
848 | { | 848 | { |
849 | struct pstore *ps; | 849 | struct pstore *ps; |
850 | int r; | ||
850 | 851 | ||
851 | /* allocate the pstore */ | 852 | /* allocate the pstore */ |
852 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); | 853 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
@@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store, | |||
868 | 869 | ||
869 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); | 870 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
870 | if (!ps->metadata_wq) { | 871 | if (!ps->metadata_wq) { |
871 | kfree(ps); | ||
872 | DMERR("couldn't start header metadata update thread"); | 872 | DMERR("couldn't start header metadata update thread"); |
873 | return -ENOMEM; | 873 | r = -ENOMEM; |
874 | goto err_workqueue; | ||
875 | } | ||
876 | |||
877 | if (options) { | ||
878 | char overflow = toupper(options[0]); | ||
879 | if (overflow == 'O') | ||
880 | store->userspace_supports_overflow = true; | ||
881 | else { | ||
882 | DMERR("Unsupported persistent store option: %s", options); | ||
883 | r = -EINVAL; | ||
884 | goto err_options; | ||
885 | } | ||
874 | } | 886 | } |
875 | 887 | ||
876 | store->context = ps; | 888 | store->context = ps; |
877 | 889 | ||
878 | return 0; | 890 | return 0; |
891 | |||
892 | err_options: | ||
893 | destroy_workqueue(ps->metadata_wq); | ||
894 | err_workqueue: | ||
895 | kfree(ps); | ||
896 | |||
897 | return r; | ||
879 | } | 898 | } |
880 | 899 | ||
881 | static unsigned persistent_status(struct dm_exception_store *store, | 900 | static unsigned persistent_status(struct dm_exception_store *store, |
@@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store, | |||
888 | case STATUSTYPE_INFO: | 907 | case STATUSTYPE_INFO: |
889 | break; | 908 | break; |
890 | case STATUSTYPE_TABLE: | 909 | case STATUSTYPE_TABLE: |
891 | DMEMIT(" P %llu", (unsigned long long)store->chunk_size); | 910 | DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", |
911 | (unsigned long long)store->chunk_size); | ||
892 | } | 912 | } |
893 | 913 | ||
894 | return sz; | 914 | return sz; |
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c | |||
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, | |||
70 | *metadata_sectors = 0; | 70 | *metadata_sectors = 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int transient_ctr(struct dm_exception_store *store, | 73 | static int transient_ctr(struct dm_exception_store *store, char *options) |
74 | unsigned argc, char **argv) | ||
75 | { | 74 | { |
76 | struct transient_c *tc; | 75 | struct transient_c *tc; |
77 | 76 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) | |||
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | /* | 1100 | /* |
1101 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | 1101 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> |
1102 | */ | 1102 | */ |
1103 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 1103 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1104 | { | 1104 | { |
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, | |||
1302 | 1302 | ||
1303 | u.store_swap = snap_dest->store; | 1303 | u.store_swap = snap_dest->store; |
1304 | snap_dest->store = snap_src->store; | 1304 | snap_dest->store = snap_src->store; |
1305 | snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; | ||
1305 | snap_src->store = u.store_swap; | 1306 | snap_src->store = u.store_swap; |
1306 | 1307 | ||
1307 | snap_dest->store->snap = snap_dest; | 1308 | snap_dest->store->snap = snap_dest; |
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1739 | 1740 | ||
1740 | pe = __find_pending_exception(s, pe, chunk); | 1741 | pe = __find_pending_exception(s, pe, chunk); |
1741 | if (!pe) { | 1742 | if (!pe) { |
1742 | s->snapshot_overflowed = 1; | 1743 | if (s->store->userspace_supports_overflow) { |
1743 | DMERR("Snapshot overflowed: Unable to allocate exception."); | 1744 | s->snapshot_overflowed = 1; |
1745 | DMERR("Snapshot overflowed: Unable to allocate exception."); | ||
1746 | } else | ||
1747 | __invalidate_snapshot(s, -ENOMEM); | ||
1744 | r = -EIO; | 1748 | r = -EIO; |
1745 | goto out_unlock; | 1749 | goto out_unlock; |
1746 | } | 1750 | } |
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = { | |||
2365 | 2369 | ||
2366 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
2367 | .name = "snapshot", | 2371 | .name = "snapshot", |
2368 | .version = {1, 14, 0}, | 2372 | .version = {1, 15, 0}, |
2369 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
2370 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
2371 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
2379 | 2383 | ||
2380 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
2381 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
2382 | .version = {1, 3, 0}, | 2386 | .version = {1, 4, 0}, |
2383 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
2384 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
2385 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6578b7bc1fbb..3897b90bd462 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3201 | metadata_low_callback, | 3201 | metadata_low_callback, |
3202 | pool); | 3202 | pool); |
3203 | if (r) | 3203 | if (r) |
3204 | goto out_free_pt; | 3204 | goto out_flags_changed; |
3205 | 3205 | ||
3206 | pt->callbacks.congested_fn = pool_is_congested; | 3206 | pt->callbacks.congested_fn = pool_is_congested; |
3207 | dm_table_add_target_callbacks(ti->table, &pt->callbacks); | 3207 | dm_table_add_target_callbacks(ti->table, &pt->callbacks); |
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
4249 | { | 4249 | { |
4250 | struct thin_c *tc = ti->private; | 4250 | struct thin_c *tc = ti->private; |
4251 | struct pool *pool = tc->pool; | 4251 | struct pool *pool = tc->pool; |
4252 | struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); | ||
4253 | |||
4254 | if (!pool_limits->discard_granularity) | ||
4255 | return; /* pool's discard support is disabled */ | ||
4252 | 4256 | ||
4253 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 4257 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
4254 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ | 4258 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) | |||
1001 | struct dm_rq_target_io *tio = info->tio; | 1001 | struct dm_rq_target_io *tio = info->tio; |
1002 | struct bio *bio = info->orig; | 1002 | struct bio *bio = info->orig; |
1003 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 1003 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
1004 | int error = clone->bi_error; | ||
1004 | 1005 | ||
1005 | bio_put(clone); | 1006 | bio_put(clone); |
1006 | 1007 | ||
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) | |||
1011 | * the remainder. | 1012 | * the remainder. |
1012 | */ | 1013 | */ |
1013 | return; | 1014 | return; |
1014 | else if (bio->bi_error) { | 1015 | else if (error) { |
1015 | /* | 1016 | /* |
1016 | * Don't notice the error to the upper layer yet. | 1017 | * Don't notice the error to the upper layer yet. |
1017 | * The error handling decision is made by the target driver, | 1018 | * The error handling decision is made by the target driver, |
1018 | * when the request is completed. | 1019 | * when the request is completed. |
1019 | */ | 1020 | */ |
1020 | tio->error = bio->bi_error; | 1021 | tio->error = error; |
1021 | return; | 1022 | return; |
1022 | } | 1023 | } |
1023 | 1024 | ||
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2837 | 2838 | ||
2838 | might_sleep(); | 2839 | might_sleep(); |
2839 | 2840 | ||
2840 | map = dm_get_live_table(md, &srcu_idx); | ||
2841 | |||
2842 | spin_lock(&_minor_lock); | 2841 | spin_lock(&_minor_lock); |
2843 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2842 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2844 | set_bit(DMF_FREEING, &md->flags); | 2843 | set_bit(DMF_FREEING, &md->flags); |
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2852 | * do not race with internal suspend. | 2851 | * do not race with internal suspend. |
2853 | */ | 2852 | */ |
2854 | mutex_lock(&md->suspend_lock); | 2853 | mutex_lock(&md->suspend_lock); |
2854 | map = dm_get_live_table(md, &srcu_idx); | ||
2855 | if (!dm_suspended_md(md)) { | 2855 | if (!dm_suspended_md(md)) { |
2856 | dm_table_presuspend_targets(map); | 2856 | dm_table_presuspend_targets(map); |
2857 | dm_table_postsuspend_targets(map); | 2857 | dm_table_postsuspend_targets(map); |
2858 | } | 2858 | } |
2859 | mutex_unlock(&md->suspend_lock); | ||
2860 | |||
2861 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2859 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
2862 | dm_put_live_table(md, srcu_idx); | 2860 | dm_put_live_table(md, srcu_idx); |
2861 | mutex_unlock(&md->suspend_lock); | ||
2863 | 2862 | ||
2864 | /* | 2863 | /* |
2865 | * Rare, but there may be I/O requests still going to complete, | 2864 | * Rare, but there may be I/O requests still going to complete, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | |||
5409 | * which will now never happen */ | 5409 | * which will now never happen */ |
5410 | wake_up_process(mddev->sync_thread->tsk); | 5410 | wake_up_process(mddev->sync_thread->tsk); |
5411 | 5411 | ||
5412 | if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) | ||
5413 | return -EBUSY; | ||
5412 | mddev_unlock(mddev); | 5414 | mddev_unlock(mddev); |
5413 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, | 5415 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, |
5414 | &mddev->recovery)); | 5416 | &mddev->recovery)); |
5417 | wait_event(mddev->sb_wait, | ||
5418 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5415 | mddev_lock_nointr(mddev); | 5419 | mddev_lock_nointr(mddev); |
5416 | 5420 | ||
5417 | mutex_lock(&mddev->open_mutex); | 5421 | mutex_lock(&mddev->open_mutex); |
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) | |||
8160 | md_reap_sync_thread(mddev); | 8164 | md_reap_sync_thread(mddev); |
8161 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | 8165 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
8162 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 8166 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
8167 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
8163 | goto unlock; | 8168 | goto unlock; |
8164 | } | 8169 | } |
8165 | 8170 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) | |||
470 | return 0; | 470 | return 0; |
471 | 471 | ||
472 | out_free_conf: | 472 | out_free_conf: |
473 | if (conf->pool) | 473 | mempool_destroy(conf->pool); |
474 | mempool_destroy(conf->pool); | ||
475 | kfree(conf->multipaths); | 474 | kfree(conf->multipaths); |
476 | kfree(conf); | 475 | kfree(conf); |
477 | mddev->private = NULL; | 476 | mddev->private = NULL; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) | |||
376 | struct md_rdev *rdev; | 376 | struct md_rdev *rdev; |
377 | bool discard_supported = false; | 377 | bool discard_supported = false; |
378 | 378 | ||
379 | rdev_for_each(rdev, mddev) { | ||
380 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
381 | rdev->data_offset << 9); | ||
382 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
383 | discard_supported = true; | ||
384 | } | ||
385 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 379 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
386 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 380 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
387 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | 381 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); |
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) | |||
390 | blk_queue_io_opt(mddev->queue, | 384 | blk_queue_io_opt(mddev->queue, |
391 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 385 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
392 | 386 | ||
387 | rdev_for_each(rdev, mddev) { | ||
388 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
389 | rdev->data_offset << 9); | ||
390 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
391 | discard_supported = true; | ||
392 | } | ||
393 | if (!discard_supported) | 393 | if (!discard_supported) |
394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | 394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); |
395 | else | 395 | else |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..ddd8a5f572aa 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
881 | } | 881 | } |
882 | 882 | ||
883 | if (bio && bio_data_dir(bio) == WRITE) { | 883 | if (bio && bio_data_dir(bio) == WRITE) { |
884 | if (bio->bi_iter.bi_sector >= | 884 | if (bio->bi_iter.bi_sector >= conf->next_resync) { |
885 | conf->mddev->curr_resync_completed) { | ||
886 | if (conf->start_next_window == MaxSector) | 885 | if (conf->start_next_window == MaxSector) |
887 | conf->start_next_window = | 886 | conf->start_next_window = |
888 | conf->next_resync + | 887 | conf->next_resync + |
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) | |||
1516 | conf->r1buf_pool = NULL; | 1515 | conf->r1buf_pool = NULL; |
1517 | 1516 | ||
1518 | spin_lock_irq(&conf->resync_lock); | 1517 | spin_lock_irq(&conf->resync_lock); |
1519 | conf->next_resync = 0; | 1518 | conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; |
1520 | conf->start_next_window = MaxSector; | 1519 | conf->start_next_window = MaxSector; |
1521 | conf->current_window_requests += | 1520 | conf->current_window_requests += |
1522 | conf->next_window_requests; | 1521 | conf->next_window_requests; |
@@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread) | |||
2383 | } | 2382 | } |
2384 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2383 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2385 | while (!list_empty(&tmp)) { | 2384 | while (!list_empty(&tmp)) { |
2386 | r1_bio = list_first_entry(&conf->bio_end_io_list, | 2385 | r1_bio = list_first_entry(&tmp, struct r1bio, |
2387 | struct r1bio, retry_list); | 2386 | retry_list); |
2388 | list_del(&r1_bio->retry_list); | 2387 | list_del(&r1_bio->retry_list); |
2389 | raid_end_bio_io(r1_bio); | 2388 | raid_end_bio_io(r1_bio); |
2390 | } | 2389 | } |
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2843 | 2842 | ||
2844 | abort: | 2843 | abort: |
2845 | if (conf) { | 2844 | if (conf) { |
2846 | if (conf->r1bio_pool) | 2845 | mempool_destroy(conf->r1bio_pool); |
2847 | mempool_destroy(conf->r1bio_pool); | ||
2848 | kfree(conf->mirrors); | 2846 | kfree(conf->mirrors); |
2849 | safe_put_page(conf->tmppage); | 2847 | safe_put_page(conf->tmppage); |
2850 | kfree(conf->poolinfo); | 2848 | kfree(conf->poolinfo); |
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) | |||
2946 | { | 2944 | { |
2947 | struct r1conf *conf = priv; | 2945 | struct r1conf *conf = priv; |
2948 | 2946 | ||
2949 | if (conf->r1bio_pool) | 2947 | mempool_destroy(conf->r1bio_pool); |
2950 | mempool_destroy(conf->r1bio_pool); | ||
2951 | kfree(conf->mirrors); | 2948 | kfree(conf->mirrors); |
2952 | safe_put_page(conf->tmppage); | 2949 | safe_put_page(conf->tmppage); |
2953 | kfree(conf->poolinfo); | 2950 | kfree(conf->poolinfo); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..9f69dc526f8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread) | |||
2688 | } | 2688 | } |
2689 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2689 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2690 | while (!list_empty(&tmp)) { | 2690 | while (!list_empty(&tmp)) { |
2691 | r10_bio = list_first_entry(&conf->bio_end_io_list, | 2691 | r10_bio = list_first_entry(&tmp, struct r10bio, |
2692 | struct r10bio, retry_list); | 2692 | retry_list); |
2693 | list_del(&r10_bio->retry_list); | 2693 | list_del(&r10_bio->retry_list); |
2694 | raid_end_bio_io(r10_bio); | 2694 | raid_end_bio_io(r10_bio); |
2695 | } | 2695 | } |
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", | 3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", |
3487 | mdname(mddev)); | 3487 | mdname(mddev)); |
3488 | if (conf) { | 3488 | if (conf) { |
3489 | if (conf->r10bio_pool) | 3489 | mempool_destroy(conf->r10bio_pool); |
3490 | mempool_destroy(conf->r10bio_pool); | ||
3491 | kfree(conf->mirrors); | 3490 | kfree(conf->mirrors); |
3492 | safe_put_page(conf->tmppage); | 3491 | safe_put_page(conf->tmppage); |
3493 | kfree(conf); | 3492 | kfree(conf); |
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) | |||
3682 | 3681 | ||
3683 | out_free_conf: | 3682 | out_free_conf: |
3684 | md_unregister_thread(&mddev->thread); | 3683 | md_unregister_thread(&mddev->thread); |
3685 | if (conf->r10bio_pool) | 3684 | mempool_destroy(conf->r10bio_pool); |
3686 | mempool_destroy(conf->r10bio_pool); | ||
3687 | safe_put_page(conf->tmppage); | 3685 | safe_put_page(conf->tmppage); |
3688 | kfree(conf->mirrors); | 3686 | kfree(conf->mirrors); |
3689 | kfree(conf); | 3687 | kfree(conf); |
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) | |||
3696 | { | 3694 | { |
3697 | struct r10conf *conf = priv; | 3695 | struct r10conf *conf = priv; |
3698 | 3696 | ||
3699 | if (conf->r10bio_pool) | 3697 | mempool_destroy(conf->r10bio_pool); |
3700 | mempool_destroy(conf->r10bio_pool); | ||
3701 | safe_put_page(conf->tmppage); | 3698 | safe_put_page(conf->tmppage); |
3702 | kfree(conf->mirrors); | 3699 | kfree(conf->mirrors); |
3703 | kfree(conf->mirrors_old); | 3700 | kfree(conf->mirrors_old); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) | |||
2271 | drop_one_stripe(conf)) | 2271 | drop_one_stripe(conf)) |
2272 | ; | 2272 | ; |
2273 | 2273 | ||
2274 | if (conf->slab_cache) | 2274 | kmem_cache_destroy(conf->slab_cache); |
2275 | kmem_cache_destroy(conf->slab_cache); | ||
2276 | conf->slab_cache = NULL; | 2275 | conf->slab_cache = NULL; |
2277 | } | 2276 | } |
2278 | 2277 | ||
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3150 | spin_unlock_irq(&sh->stripe_lock); | 3149 | spin_unlock_irq(&sh->stripe_lock); |
3151 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 3150 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
3152 | wake_up(&conf->wait_for_overlap); | 3151 | wake_up(&conf->wait_for_overlap); |
3152 | if (bi) | ||
3153 | s->to_read--; | ||
3153 | while (bi && bi->bi_iter.bi_sector < | 3154 | while (bi && bi->bi_iter.bi_sector < |
3154 | sh->dev[i].sector + STRIPE_SECTORS) { | 3155 | sh->dev[i].sector + STRIPE_SECTORS) { |
3155 | struct bio *nextbi = | 3156 | struct bio *nextbi = |
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3169 | */ | 3170 | */ |
3170 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 3171 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
3171 | } | 3172 | } |
3173 | s->to_write = 0; | ||
3174 | s->written = 0; | ||
3172 | 3175 | ||
3173 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 3176 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
3174 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 3177 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3300 | */ | 3303 | */ |
3301 | return 0; | 3304 | return 0; |
3302 | 3305 | ||
3303 | for (i = 0; i < s->failed; i++) { | 3306 | for (i = 0; i < s->failed && i < 2; i++) { |
3304 | if (fdev[i]->towrite && | 3307 | if (fdev[i]->towrite && |
3305 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3308 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
3306 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) | 3309 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3324 | sh->sector < sh->raid_conf->mddev->recovery_cp) | 3327 | sh->sector < sh->raid_conf->mddev->recovery_cp) |
3325 | /* reconstruct-write isn't being forced */ | 3328 | /* reconstruct-write isn't being forced */ |
3326 | return 0; | 3329 | return 0; |
3327 | for (i = 0; i < s->failed; i++) { | 3330 | for (i = 0; i < s->failed && i < 2; i++) { |
3328 | if (s->failed_num[i] != sh->pd_idx && | 3331 | if (s->failed_num[i] != sh->pd_idx && |
3329 | s->failed_num[i] != sh->qd_idx && | 3332 | s->failed_num[i] != sh->qd_idx && |
3330 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3333 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 4b54128bc78e..a726f01e3b02 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -138,7 +138,7 @@ static void asic3_irq_flip_edge(struct asic3 *asic, | |||
138 | spin_unlock_irqrestore(&asic->lock, flags); | 138 | spin_unlock_irqrestore(&asic->lock, flags); |
139 | } | 139 | } |
140 | 140 | ||
141 | static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | 141 | static void asic3_irq_demux(struct irq_desc *desc) |
142 | { | 142 | { |
143 | struct asic3 *asic = irq_desc_get_handler_data(desc); | 143 | struct asic3 *asic = irq_desc_get_handler_data(desc); |
144 | struct irq_data *data = irq_desc_get_irq_data(desc); | 144 | struct irq_data *data = irq_desc_get_irq_data(desc); |
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index a76eb6ef47a0..b279205659a4 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c | |||
@@ -205,7 +205,7 @@ static void pcap_isr_work(struct work_struct *work) | |||
205 | } while (gpio_get_value(pdata->gpio)); | 205 | } while (gpio_get_value(pdata->gpio)); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) | 208 | static void pcap_irq_handler(struct irq_desc *desc) |
209 | { | 209 | { |
210 | struct pcap_chip *pcap = irq_desc_get_handler_data(desc); | 210 | struct pcap_chip *pcap = irq_desc_get_handler_data(desc); |
211 | 211 | ||
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index 9131cdcdc64a..6ccaf90d98fd 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c | |||
@@ -98,7 +98,7 @@ static struct irq_chip egpio_muxed_chip = { | |||
98 | .irq_unmask = egpio_unmask, | 98 | .irq_unmask = egpio_unmask, |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static void egpio_handler(unsigned int irq, struct irq_desc *desc) | 101 | static void egpio_handler(struct irq_desc *desc) |
102 | { | 102 | { |
103 | struct egpio_info *ei = irq_desc_get_handler_data(desc); | 103 | struct egpio_info *ei = irq_desc_get_handler_data(desc); |
104 | int irqpin; | 104 | int irqpin; |
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index f28cb28a62f8..2c7f8d7c0595 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h | |||
@@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev); | |||
42 | .thaw = intel_lpss_resume, \ | 42 | .thaw = intel_lpss_resume, \ |
43 | .poweroff = intel_lpss_suspend, \ | 43 | .poweroff = intel_lpss_suspend, \ |
44 | .restore = intel_lpss_resume, | 44 | .restore = intel_lpss_resume, |
45 | #else | ||
46 | #define INTEL_LPSS_SLEEP_PM_OPS | ||
45 | #endif | 47 | #endif |
46 | 48 | ||
47 | #define INTEL_LPSS_RUNTIME_PM_OPS \ | 49 | #define INTEL_LPSS_RUNTIME_PM_OPS \ |
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 5bb49f08955d..798e44306382 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -65,7 +65,7 @@ struct jz4740_adc { | |||
65 | spinlock_t lock; | 65 | spinlock_t lock; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) | 68 | static void jz4740_adc_irq_demux(struct irq_desc *desc) |
69 | { | 69 | { |
70 | struct irq_chip_generic *gc = irq_desc_get_handler_data(desc); | 70 | struct irq_chip_generic *gc = irq_desc_get_handler_data(desc); |
71 | uint8_t status; | 71 | uint8_t status; |
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c index c52162ea3d0a..586098f1b233 100644 --- a/drivers/mfd/max77843.c +++ b/drivers/mfd/max77843.c | |||
@@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843) | |||
80 | if (!max77843->i2c_chg) { | 80 | if (!max77843->i2c_chg) { |
81 | dev_err(&max77843->i2c->dev, | 81 | dev_err(&max77843->i2c->dev, |
82 | "Cannot allocate I2C device for Charger\n"); | 82 | "Cannot allocate I2C device for Charger\n"); |
83 | return PTR_ERR(max77843->i2c_chg); | 83 | return -ENODEV; |
84 | } | 84 | } |
85 | i2c_set_clientdata(max77843->i2c_chg, max77843); | 85 | i2c_set_clientdata(max77843->i2c_chg, max77843); |
86 | 86 | ||
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index 59502d02cd15..1b7ec0870c2a 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c | |||
@@ -156,7 +156,7 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master) | |||
156 | return ret; | 156 | return ret; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc) | 159 | static void pm8xxx_irq_handler(struct irq_desc *desc) |
160 | { | 160 | { |
161 | struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); | 161 | struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); |
162 | struct irq_chip *irq_chip = irq_desc_get_chip(desc); | 162 | struct irq_chip *irq_chip = irq_desc_get_chip(desc); |
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 16fc1adc4fa3..94bd89cb1f06 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c | |||
@@ -185,7 +185,7 @@ static struct mfd_cell t7l66xb_cells[] = { | |||
185 | /*--------------------------------------------------------------------------*/ | 185 | /*--------------------------------------------------------------------------*/ |
186 | 186 | ||
187 | /* Handle the T7L66XB interrupt mux */ | 187 | /* Handle the T7L66XB interrupt mux */ |
188 | static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) | 188 | static void t7l66xb_irq(struct irq_desc *desc) |
189 | { | 189 | { |
190 | struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc); | 190 | struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc); |
191 | unsigned int isr; | 191 | unsigned int isr; |
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 775b9aca871a..8c84a513016b 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c | |||
@@ -522,8 +522,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) | |||
522 | 522 | ||
523 | /*--------------------------------------------------------------------------*/ | 523 | /*--------------------------------------------------------------------------*/ |
524 | 524 | ||
525 | static void | 525 | static void tc6393xb_irq(struct irq_desc *desc) |
526 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) | ||
527 | { | 526 | { |
528 | struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc); | 527 | struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc); |
529 | unsigned int isr; | 528 | unsigned int isr; |
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 9a2302129711..f691d7ecad52 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c | |||
@@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb) | |||
282 | * SIBCLK to talk to the chip. We leave the clock running until | 282 | * SIBCLK to talk to the chip. We leave the clock running until |
283 | * we have finished processing all interrupts from the chip. | 283 | * we have finished processing all interrupts from the chip. |
284 | */ | 284 | */ |
285 | static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc) | 285 | static void ucb1x00_irq(struct irq_desc *desc) |
286 | { | 286 | { |
287 | struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); | 287 | struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); |
288 | unsigned int isr, i; | 288 | unsigned int isr, i; |
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index 6f484dfe78f9..6982f603fadc 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | ccflags-y := -Werror | 1 | ccflags-y := -Werror -Wno-unused-const-variable |
2 | 2 | ||
3 | cxl-y += main.o file.o irq.o fault.o native.o | 3 | cxl-y += main.o file.o irq.o fault.o native.o |
4 | cxl-y += context.o sysfs.o debugfs.o pci.o trace.o | 4 | cxl-y += context.o sysfs.o debugfs.o pci.o trace.o |
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 8af12c884b04..103baf0e0c5b 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c | |||
@@ -105,6 +105,7 @@ EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); | |||
105 | 105 | ||
106 | void cxl_free_afu_irqs(struct cxl_context *ctx) | 106 | void cxl_free_afu_irqs(struct cxl_context *ctx) |
107 | { | 107 | { |
108 | afu_irq_name_free(ctx); | ||
108 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | 109 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); |
109 | } | 110 | } |
110 | EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); | 111 | EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index e762f85ee233..2faa1270d085 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -275,6 +275,9 @@ static void reclaim_ctx(struct rcu_head *rcu) | |||
275 | if (ctx->kernelapi) | 275 | if (ctx->kernelapi) |
276 | kfree(ctx->mapping); | 276 | kfree(ctx->mapping); |
277 | 277 | ||
278 | if (ctx->irq_bitmap) | ||
279 | kfree(ctx->irq_bitmap); | ||
280 | |||
278 | kfree(ctx); | 281 | kfree(ctx); |
279 | } | 282 | } |
280 | 283 | ||
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 1c30ef77073d..0cfb9c129f27 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -677,6 +677,7 @@ int cxl_register_serr_irq(struct cxl_afu *afu); | |||
677 | void cxl_release_serr_irq(struct cxl_afu *afu); | 677 | void cxl_release_serr_irq(struct cxl_afu *afu); |
678 | int afu_register_irqs(struct cxl_context *ctx, u32 count); | 678 | int afu_register_irqs(struct cxl_context *ctx, u32 count); |
679 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); | 679 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); |
680 | void afu_irq_name_free(struct cxl_context *ctx); | ||
680 | irqreturn_t cxl_slice_irq_err(int irq, void *data); | 681 | irqreturn_t cxl_slice_irq_err(int irq, void *data); |
681 | 682 | ||
682 | int cxl_debugfs_init(void); | 683 | int cxl_debugfs_init(void); |
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index a30bf285b5bd..7ccd2998be92 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c | |||
@@ -120,9 +120,16 @@ int afu_release(struct inode *inode, struct file *file) | |||
120 | __func__, ctx->pe); | 120 | __func__, ctx->pe); |
121 | cxl_context_detach(ctx); | 121 | cxl_context_detach(ctx); |
122 | 122 | ||
123 | mutex_lock(&ctx->mapping_lock); | 123 | |
124 | ctx->mapping = NULL; | 124 | /* |
125 | mutex_unlock(&ctx->mapping_lock); | 125 | * Delete the context's mapping pointer, unless it's created by the |
126 | * kernel API, in which case leave it so it can be freed by reclaim_ctx() | ||
127 | */ | ||
128 | if (!ctx->kernelapi) { | ||
129 | mutex_lock(&ctx->mapping_lock); | ||
130 | ctx->mapping = NULL; | ||
131 | mutex_unlock(&ctx->mapping_lock); | ||
132 | } | ||
126 | 133 | ||
127 | put_device(&ctx->afu->dev); | 134 | put_device(&ctx->afu->dev); |
128 | 135 | ||
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 583b42afeda2..09a406058c46 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c | |||
@@ -414,7 +414,7 @@ void cxl_release_psl_irq(struct cxl_afu *afu) | |||
414 | kfree(afu->psl_irq_name); | 414 | kfree(afu->psl_irq_name); |
415 | } | 415 | } |
416 | 416 | ||
417 | static void afu_irq_name_free(struct cxl_context *ctx) | 417 | void afu_irq_name_free(struct cxl_context *ctx) |
418 | { | 418 | { |
419 | struct cxl_irq_name *irq_name, *tmp; | 419 | struct cxl_irq_name *irq_name, *tmp; |
420 | 420 | ||
@@ -524,7 +524,5 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) | |||
524 | afu_irq_name_free(ctx); | 524 | afu_irq_name_free(ctx); |
525 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | 525 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); |
526 | 526 | ||
527 | kfree(ctx->irq_bitmap); | ||
528 | ctx->irq_bitmap = NULL; | ||
529 | ctx->irq_count = 0; | 527 | ctx->irq_count = 0; |
530 | } | 528 | } |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index b37f2e8004f5..d2e75c88f4d2 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -457,6 +457,7 @@ static int activate_afu_directed(struct cxl_afu *afu) | |||
457 | 457 | ||
458 | dev_info(&afu->dev, "Activating AFU directed mode\n"); | 458 | dev_info(&afu->dev, "Activating AFU directed mode\n"); |
459 | 459 | ||
460 | afu->num_procs = afu->max_procs_virtualised; | ||
460 | if (afu->spa == NULL) { | 461 | if (afu->spa == NULL) { |
461 | if (cxl_alloc_spa(afu)) | 462 | if (cxl_alloc_spa(afu)) |
462 | return -ENOMEM; | 463 | return -ENOMEM; |
@@ -468,7 +469,6 @@ static int activate_afu_directed(struct cxl_afu *afu) | |||
468 | cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); | 469 | cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); |
469 | 470 | ||
470 | afu->current_mode = CXL_MODE_DIRECTED; | 471 | afu->current_mode = CXL_MODE_DIRECTED; |
471 | afu->num_procs = afu->max_procs_virtualised; | ||
472 | 472 | ||
473 | if ((rc = cxl_chardev_m_afu_add(afu))) | 473 | if ((rc = cxl_chardev_m_afu_add(afu))) |
474 | return rc; | 474 | return rc; |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 02c85160bfe9..85761d7eb333 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -1035,6 +1035,32 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) | |||
1035 | return 0; | 1035 | return 0; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | /* | ||
1039 | * Workaround a PCIe Host Bridge defect on some cards, that can cause | ||
1040 | * malformed Transaction Layer Packet (TLP) errors to be erroneously | ||
1041 | * reported. Mask this error in the Uncorrectable Error Mask Register. | ||
1042 | * | ||
1043 | * The upper nibble of the PSL revision is used to distinguish between | ||
1044 | * different cards. The affected ones have it set to 0. | ||
1045 | */ | ||
1046 | static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev) | ||
1047 | { | ||
1048 | int aer; | ||
1049 | u32 data; | ||
1050 | |||
1051 | if (adapter->psl_rev & 0xf000) | ||
1052 | return; | ||
1053 | if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))) | ||
1054 | return; | ||
1055 | pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data); | ||
1056 | if (data & PCI_ERR_UNC_MALF_TLP) | ||
1057 | if (data & PCI_ERR_UNC_INTN) | ||
1058 | return; | ||
1059 | data |= PCI_ERR_UNC_MALF_TLP; | ||
1060 | data |= PCI_ERR_UNC_INTN; | ||
1061 | pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data); | ||
1062 | } | ||
1063 | |||
1038 | static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) | 1064 | static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) |
1039 | { | 1065 | { |
1040 | if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) | 1066 | if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) |
@@ -1134,6 +1160,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
1134 | if ((rc = cxl_vsec_looks_ok(adapter, dev))) | 1160 | if ((rc = cxl_vsec_looks_ok(adapter, dev))) |
1135 | return rc; | 1161 | return rc; |
1136 | 1162 | ||
1163 | cxl_fixup_malformed_tlp(adapter, dev); | ||
1164 | |||
1137 | if ((rc = setup_cxl_bars(dev))) | 1165 | if ((rc = setup_cxl_bars(dev))) |
1138 | return rc; | 1166 | return rc; |
1139 | 1167 | ||
@@ -1249,8 +1277,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1249 | int slice; | 1277 | int slice; |
1250 | int rc; | 1278 | int rc; |
1251 | 1279 | ||
1252 | pci_dev_get(dev); | ||
1253 | |||
1254 | if (cxl_verbose) | 1280 | if (cxl_verbose) |
1255 | dump_cxl_config_space(dev); | 1281 | dump_cxl_config_space(dev); |
1256 | 1282 | ||
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 25868c2ec03e..02006f7109a8 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
@@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu) | |||
592 | 592 | ||
593 | /* conditionally create the add the binary file for error info buffer */ | 593 | /* conditionally create the add the binary file for error info buffer */ |
594 | if (afu->eb_len) { | 594 | if (afu->eb_len) { |
595 | sysfs_attr_init(&afu->attr_eb.attr); | ||
596 | |||
595 | afu->attr_eb.attr.name = "afu_err_buff"; | 597 | afu->attr_eb.attr.name = "afu_err_buff"; |
596 | afu->attr_eb.attr.mode = S_IRUGO; | 598 | afu->attr_eb.attr.mode = S_IRUGO; |
597 | afu->attr_eb.size = afu->eb_len; | 599 | afu->attr_eb.size = afu->eb_len; |
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 6dd16a6d153f..94b520896b18 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c | |||
@@ -48,6 +48,12 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) | |||
48 | 48 | ||
49 | phb = pci_bus_to_host(dev->bus); | 49 | phb = pci_bus_to_host(dev->bus); |
50 | afu = (struct cxl_afu *)phb->private_data; | 50 | afu = (struct cxl_afu *)phb->private_data; |
51 | |||
52 | if (!cxl_adapter_link_ok(afu->adapter)) { | ||
53 | dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); | ||
54 | return false; | ||
55 | } | ||
56 | |||
51 | set_dma_ops(&dev->dev, &dma_direct_ops); | 57 | set_dma_ops(&dev->dev, &dma_direct_ops); |
52 | set_dma_offset(&dev->dev, PAGE_OFFSET); | 58 | set_dma_offset(&dev->dev, PAGE_OFFSET); |
53 | 59 | ||
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 4b469cf9e60f..8504dbeacd3b 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c | |||
@@ -204,6 +204,8 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) | |||
204 | if (!dir) | 204 | if (!dir) |
205 | return -ENOMEM; | 205 | return -ENOMEM; |
206 | 206 | ||
207 | dev->dbgfs_dir = dir; | ||
208 | |||
207 | f = debugfs_create_file("meclients", S_IRUSR, dir, | 209 | f = debugfs_create_file("meclients", S_IRUSR, dir, |
208 | dev, &mei_dbgfs_fops_meclients); | 210 | dev, &mei_dbgfs_fops_meclients); |
209 | if (!f) { | 211 | if (!f) { |
@@ -228,7 +230,6 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) | |||
228 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); | 230 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); |
229 | goto err; | 231 | goto err; |
230 | } | 232 | } |
231 | dev->dbgfs_dir = dir; | ||
232 | return 0; | 233 | return 0; |
233 | err: | 234 | err: |
234 | mei_dbgfs_deregister(dev); | 235 | mei_dbgfs_deregister(dev); |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8eec887c8f70..6d7c188fb65c 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
@@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
1209 | * after the host receives the enum_resp | 1209 | * after the host receives the enum_resp |
1210 | * message clients may be added or removed | 1210 | * message clients may be added or removed |
1211 | */ | 1211 | */ |
1212 | if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && | 1212 | if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || |
1213 | dev->hbm_state >= MEI_HBM_STOPPED) { | 1213 | dev->hbm_state >= MEI_HBM_STOPPED) { |
1214 | dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", | 1214 | dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", |
1215 | dev->dev_state, dev->hbm_state); | 1215 | dev->dev_state, dev->hbm_state); |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) | |||
134 | int err = cmd->error; | 134 | int err = cmd->error; |
135 | 135 | ||
136 | /* Flag re-tuning needed on CRC errors */ | 136 | /* Flag re-tuning needed on CRC errors */ |
137 | if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || | 137 | if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && |
138 | cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && | ||
139 | (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || | ||
138 | (mrq->data && mrq->data->error == -EILSEQ) || | 140 | (mrq->data && mrq->data->error == -EILSEQ) || |
139 | (mrq->stop && mrq->stop->error == -EILSEQ)) | 141 | (mrq->stop && mrq->stop->error == -EILSEQ))) |
140 | mmc_retune_needed(host); | 142 | mmc_retune_needed(host); |
141 | 143 | ||
142 | if (err && cmd->retries && mmc_host_is_spi(host)) { | 144 | if (err && cmd->retries && mmc_host_is_spi(host)) { |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) | |||
457 | 0, &cd_gpio_invert); | 457 | 0, &cd_gpio_invert); |
458 | if (!ret) | 458 | if (!ret) |
459 | dev_info(host->parent, "Got CD GPIO\n"); | 459 | dev_info(host->parent, "Got CD GPIO\n"); |
460 | else if (ret != -ENOENT) | 460 | else if (ret != -ENOENT && ret != -ENOSYS) |
461 | return ret; | 461 | return ret; |
462 | 462 | ||
463 | /* | 463 | /* |
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) | |||
481 | ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); | 481 | ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); |
482 | if (!ret) | 482 | if (!ret) |
483 | dev_info(host->parent, "Got WP GPIO\n"); | 483 | dev_info(host->parent, "Got WP GPIO\n"); |
484 | else if (ret != -ENOENT) | 484 | else if (ret != -ENOENT && ret != -ENOSYS) |
485 | return ret; | 485 | return ret; |
486 | 486 | ||
487 | if (of_property_read_bool(np, "disable-wp")) | 487 | if (of_property_read_bool(np, "disable-wp")) |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 781e4db31767..7fb0753abe30 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -182,6 +182,7 @@ struct omap_hsmmc_host { | |||
182 | struct clk *fclk; | 182 | struct clk *fclk; |
183 | struct clk *dbclk; | 183 | struct clk *dbclk; |
184 | struct regulator *pbias; | 184 | struct regulator *pbias; |
185 | bool pbias_enabled; | ||
185 | void __iomem *base; | 186 | void __iomem *base; |
186 | int vqmmc_enabled; | 187 | int vqmmc_enabled; |
187 | resource_size_t mapbase; | 188 | resource_size_t mapbase; |
@@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, | |||
328 | return ret; | 329 | return ret; |
329 | } | 330 | } |
330 | 331 | ||
331 | if (!regulator_is_enabled(host->pbias)) { | 332 | if (host->pbias_enabled == 0) { |
332 | ret = regulator_enable(host->pbias); | 333 | ret = regulator_enable(host->pbias); |
333 | if (ret) { | 334 | if (ret) { |
334 | dev_err(host->dev, "pbias reg enable fail\n"); | 335 | dev_err(host->dev, "pbias reg enable fail\n"); |
335 | return ret; | 336 | return ret; |
336 | } | 337 | } |
338 | host->pbias_enabled = 1; | ||
337 | } | 339 | } |
338 | } else { | 340 | } else { |
339 | if (regulator_is_enabled(host->pbias)) { | 341 | if (host->pbias_enabled == 1) { |
340 | ret = regulator_disable(host->pbias); | 342 | ret = regulator_disable(host->pbias); |
341 | if (ret) { | 343 | if (ret) { |
342 | dev_err(host->dev, "pbias reg disable fail\n"); | 344 | dev_err(host->dev, "pbias reg disable fail\n"); |
343 | return ret; | 345 | return ret; |
344 | } | 346 | } |
347 | host->pbias_enabled = 0; | ||
345 | } | 348 | } |
346 | } | 349 | } |
347 | 350 | ||
@@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
475 | mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); | 478 | mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); |
476 | if (IS_ERR(mmc->supply.vmmc)) { | 479 | if (IS_ERR(mmc->supply.vmmc)) { |
477 | ret = PTR_ERR(mmc->supply.vmmc); | 480 | ret = PTR_ERR(mmc->supply.vmmc); |
478 | if (ret != -ENODEV) | 481 | if ((ret != -ENODEV) && host->dev->of_node) |
479 | return ret; | 482 | return ret; |
480 | dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", | 483 | dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", |
481 | PTR_ERR(mmc->supply.vmmc)); | 484 | PTR_ERR(mmc->supply.vmmc)); |
@@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
490 | mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); | 493 | mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); |
491 | if (IS_ERR(mmc->supply.vqmmc)) { | 494 | if (IS_ERR(mmc->supply.vqmmc)) { |
492 | ret = PTR_ERR(mmc->supply.vqmmc); | 495 | ret = PTR_ERR(mmc->supply.vqmmc); |
493 | if (ret != -ENODEV) | 496 | if ((ret != -ENODEV) && host->dev->of_node) |
494 | return ret; | 497 | return ret; |
495 | dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", | 498 | dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", |
496 | PTR_ERR(mmc->supply.vqmmc)); | 499 | PTR_ERR(mmc->supply.vqmmc)); |
@@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
500 | host->pbias = devm_regulator_get_optional(host->dev, "pbias"); | 503 | host->pbias = devm_regulator_get_optional(host->dev, "pbias"); |
501 | if (IS_ERR(host->pbias)) { | 504 | if (IS_ERR(host->pbias)) { |
502 | ret = PTR_ERR(host->pbias); | 505 | ret = PTR_ERR(host->pbias); |
503 | if (ret != -ENODEV) | 506 | if ((ret != -ENODEV) && host->dev->of_node) |
504 | return ret; | 507 | return ret; |
505 | dev_dbg(host->dev, "unable to get pbias regulator %ld\n", | 508 | dev_dbg(host->dev, "unable to get pbias regulator %ld\n", |
506 | PTR_ERR(host->pbias)); | 509 | PTR_ERR(host->pbias)); |
@@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
2053 | host->base = base + pdata->reg_offset; | 2056 | host->base = base + pdata->reg_offset; |
2054 | host->power_mode = MMC_POWER_OFF; | 2057 | host->power_mode = MMC_POWER_OFF; |
2055 | host->next_data.cookie = 1; | 2058 | host->next_data.cookie = 1; |
2059 | host->pbias_enabled = 0; | ||
2056 | host->vqmmc_enabled = 0; | 2060 | host->vqmmc_enabled = 0; |
2057 | 2061 | ||
2058 | ret = omap_hsmmc_gpio_init(mmc, host, pdata); | 2062 | ret = omap_hsmmc_gpio_init(mmc, host, pdata); |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/mmc/host.h> | 30 | #include <linux/mmc/host.h> |
31 | #include <linux/mmc/slot-gpio.h> | ||
31 | #include <linux/io.h> | 32 | #include <linux/io.h> |
32 | #include <linux/regulator/consumer.h> | 33 | #include <linux/regulator/consumer.h> |
33 | #include <linux/gpio.h> | 34 | #include <linux/gpio.h> |
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) | |||
454 | { | 455 | { |
455 | struct pxamci_host *host = mmc_priv(mmc); | 456 | struct pxamci_host *host = mmc_priv(mmc); |
456 | 457 | ||
457 | if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { | 458 | if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) |
458 | if (host->pdata->gpio_card_ro_invert) | 459 | return mmc_gpio_get_ro(mmc); |
459 | return !gpio_get_value(host->pdata->gpio_card_ro); | ||
460 | else | ||
461 | return gpio_get_value(host->pdata->gpio_card_ro); | ||
462 | } | ||
463 | if (host->pdata && host->pdata->get_ro) | 460 | if (host->pdata && host->pdata->get_ro) |
464 | return !!host->pdata->get_ro(mmc_dev(mmc)); | 461 | return !!host->pdata->get_ro(mmc_dev(mmc)); |
465 | /* | 462 | /* |
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) | |||
551 | 548 | ||
552 | static const struct mmc_host_ops pxamci_ops = { | 549 | static const struct mmc_host_ops pxamci_ops = { |
553 | .request = pxamci_request, | 550 | .request = pxamci_request, |
551 | .get_cd = mmc_gpio_get_cd, | ||
554 | .get_ro = pxamci_get_ro, | 552 | .get_ro = pxamci_get_ro, |
555 | .set_ios = pxamci_set_ios, | 553 | .set_ios = pxamci_set_ios, |
556 | .enable_sdio_irq = pxamci_enable_sdio_irq, | 554 | .enable_sdio_irq = pxamci_enable_sdio_irq, |
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) | |||
790 | gpio_power = host->pdata->gpio_power; | 788 | gpio_power = host->pdata->gpio_power; |
791 | } | 789 | } |
792 | if (gpio_is_valid(gpio_power)) { | 790 | if (gpio_is_valid(gpio_power)) { |
793 | ret = gpio_request(gpio_power, "mmc card power"); | 791 | ret = devm_gpio_request(&pdev->dev, gpio_power, |
792 | "mmc card power"); | ||
794 | if (ret) { | 793 | if (ret) { |
795 | dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); | 794 | dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", |
795 | gpio_power); | ||
796 | goto out; | 796 | goto out; |
797 | } | 797 | } |
798 | gpio_direction_output(gpio_power, | 798 | gpio_direction_output(gpio_power, |
799 | host->pdata->gpio_power_invert); | 799 | host->pdata->gpio_power_invert); |
800 | } | 800 | } |
801 | if (gpio_is_valid(gpio_ro)) { | 801 | if (gpio_is_valid(gpio_ro)) |
802 | ret = gpio_request(gpio_ro, "mmc card read only"); | 802 | ret = mmc_gpio_request_ro(mmc, gpio_ro); |
803 | if (ret) { | 803 | if (ret) { |
804 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); | 804 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); |
805 | goto err_gpio_ro; | 805 | goto out; |
806 | } | 806 | } else { |
807 | gpio_direction_input(gpio_ro); | 807 | mmc->caps |= host->pdata->gpio_card_ro_invert ? |
808 | MMC_CAP2_RO_ACTIVE_HIGH : 0; | ||
808 | } | 809 | } |
809 | if (gpio_is_valid(gpio_cd)) { | ||
810 | ret = gpio_request(gpio_cd, "mmc card detect"); | ||
811 | if (ret) { | ||
812 | dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); | ||
813 | goto err_gpio_cd; | ||
814 | } | ||
815 | gpio_direction_input(gpio_cd); | ||
816 | 810 | ||
817 | ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, | 811 | if (gpio_is_valid(gpio_cd)) |
818 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | 812 | ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); |
819 | "mmc card detect", mmc); | 813 | if (ret) { |
820 | if (ret) { | 814 | dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); |
821 | dev_err(&pdev->dev, "failed to request card detect IRQ\n"); | 815 | goto out; |
822 | goto err_request_irq; | ||
823 | } | ||
824 | } | 816 | } |
825 | 817 | ||
826 | if (host->pdata && host->pdata->init) | 818 | if (host->pdata && host->pdata->init) |
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
835 | 827 | ||
836 | return 0; | 828 | return 0; |
837 | 829 | ||
838 | err_request_irq: | 830 | out: |
839 | gpio_free(gpio_cd); | ||
840 | err_gpio_cd: | ||
841 | gpio_free(gpio_ro); | ||
842 | err_gpio_ro: | ||
843 | gpio_free(gpio_power); | ||
844 | out: | ||
845 | if (host) { | 831 | if (host) { |
846 | if (host->dma_chan_rx) | 832 | if (host->dma_chan_rx) |
847 | dma_release_channel(host->dma_chan_rx); | 833 | dma_release_channel(host->dma_chan_rx); |
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) | |||
873 | gpio_ro = host->pdata->gpio_card_ro; | 859 | gpio_ro = host->pdata->gpio_card_ro; |
874 | gpio_power = host->pdata->gpio_power; | 860 | gpio_power = host->pdata->gpio_power; |
875 | } | 861 | } |
876 | if (gpio_is_valid(gpio_cd)) { | ||
877 | free_irq(gpio_to_irq(gpio_cd), mmc); | ||
878 | gpio_free(gpio_cd); | ||
879 | } | ||
880 | if (gpio_is_valid(gpio_ro)) | ||
881 | gpio_free(gpio_ro); | ||
882 | if (gpio_is_valid(gpio_power)) | ||
883 | gpio_free(gpio_power); | ||
884 | if (host->vcc) | 862 | if (host->vcc) |
885 | regulator_put(host->vcc); | 863 | regulator_put(host->vcc); |
886 | 864 | ||
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1556643a41d..a0f05de5409f 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c | |||
@@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { | |||
43 | 43 | ||
44 | static const struct sdhci_pltfm_data soc_data_sama5d2 = { | 44 | static const struct sdhci_pltfm_data soc_data_sama5d2 = { |
45 | .ops = &sdhci_at91_sama5d2_ops, | 45 | .ops = &sdhci_at91_sama5d2_ops, |
46 | .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, | ||
46 | }; | 47 | }; |
47 | 48 | ||
48 | static const struct of_device_id sdhci_at91_dt_match[] = { | 49 | static const struct of_device_id sdhci_at91_dt_match[] = { |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 946d37f94a31..f5edf9d3a18a 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev, | |||
135 | struct sdhci_pxa *pxa = pltfm_host->priv; | 135 | struct sdhci_pxa *pxa = pltfm_host->priv; |
136 | struct resource *res; | 136 | struct resource *res; |
137 | 137 | ||
138 | host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; | ||
138 | host->quirks |= SDHCI_QUIRK_MISSING_CAPS; | 139 | host->quirks |= SDHCI_QUIRK_MISSING_CAPS; |
139 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 140 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
140 | "conf-sdio3"); | 141 | "conf-sdio3"); |
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) | |||
290 | uhs == MMC_TIMING_UHS_DDR50) { | 291 | uhs == MMC_TIMING_UHS_DDR50) { |
291 | reg_val &= ~SDIO3_CONF_CLK_INV; | 292 | reg_val &= ~SDIO3_CONF_CLK_INV; |
292 | reg_val |= SDIO3_CONF_SD_FB_CLK; | 293 | reg_val |= SDIO3_CONF_SD_FB_CLK; |
294 | } else if (uhs == MMC_TIMING_MMC_HS) { | ||
295 | reg_val &= ~SDIO3_CONF_CLK_INV; | ||
296 | reg_val &= ~SDIO3_CONF_SD_FB_CLK; | ||
293 | } else { | 297 | } else { |
294 | reg_val |= SDIO3_CONF_CLK_INV; | 298 | reg_val |= SDIO3_CONF_CLK_INV; |
295 | reg_val &= ~SDIO3_CONF_SD_FB_CLK; | 299 | reg_val &= ~SDIO3_CONF_SD_FB_CLK; |
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
398 | if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { | 402 | if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { |
399 | ret = armada_38x_quirks(pdev, host); | 403 | ret = armada_38x_quirks(pdev, host); |
400 | if (ret < 0) | 404 | if (ret < 0) |
401 | goto err_clk_get; | 405 | goto err_mbus_win; |
402 | ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); | 406 | ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); |
403 | if (ret < 0) | 407 | if (ret < 0) |
404 | goto err_mbus_win; | 408 | goto err_mbus_win; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 64b7fdbd1a9c..fbc7efdddcb5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1160 | host->mmc->actual_clock = 0; | 1160 | host->mmc->actual_clock = 0; |
1161 | 1161 | ||
1162 | sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); | 1162 | sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); |
1163 | if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) | ||
1164 | mdelay(1); | ||
1163 | 1165 | ||
1164 | if (clock == 0) | 1166 | if (clock == 0) |
1165 | return; | 1167 | return; |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c02ff46c8ac..9d4aa31b683a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -412,6 +412,11 @@ struct sdhci_host { | |||
412 | #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) | 412 | #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) |
413 | /* Broken Clock divider zero in controller */ | 413 | /* Broken Clock divider zero in controller */ |
414 | #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) | 414 | #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) |
415 | /* | ||
416 | * When internal clock is disabled, a delay is needed before modifying the | ||
417 | * SD clock frequency or enabling back the internal clock. | ||
418 | */ | ||
419 | #define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) | ||
415 | 420 | ||
416 | int irq; /* Device IRQ */ | 421 | int irq; /* Device IRQ */ |
417 | void __iomem *ioaddr; /* Mapped address */ | 422 | void __iomem *ioaddr; /* Mapped address */ |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -210,6 +210,16 @@ | |||
210 | #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ | 210 | #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ |
211 | #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ | 211 | #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ |
212 | 212 | ||
213 | #define SDXC_CLK_400K 0 | ||
214 | #define SDXC_CLK_25M 1 | ||
215 | #define SDXC_CLK_50M 2 | ||
216 | #define SDXC_CLK_50M_DDR 3 | ||
217 | |||
218 | struct sunxi_mmc_clk_delay { | ||
219 | u32 output; | ||
220 | u32 sample; | ||
221 | }; | ||
222 | |||
213 | struct sunxi_idma_des { | 223 | struct sunxi_idma_des { |
214 | u32 config; | 224 | u32 config; |
215 | u32 buf_size; | 225 | u32 buf_size; |
@@ -229,6 +239,7 @@ struct sunxi_mmc_host { | |||
229 | struct clk *clk_mmc; | 239 | struct clk *clk_mmc; |
230 | struct clk *clk_sample; | 240 | struct clk *clk_sample; |
231 | struct clk *clk_output; | 241 | struct clk *clk_output; |
242 | const struct sunxi_mmc_clk_delay *clk_delays; | ||
232 | 243 | ||
233 | /* irq */ | 244 | /* irq */ |
234 | spinlock_t lock; | 245 | spinlock_t lock; |
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, | |||
654 | 665 | ||
655 | /* determine delays */ | 666 | /* determine delays */ |
656 | if (rate <= 400000) { | 667 | if (rate <= 400000) { |
657 | oclk_dly = 180; | 668 | oclk_dly = host->clk_delays[SDXC_CLK_400K].output; |
658 | sclk_dly = 42; | 669 | sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; |
659 | } else if (rate <= 25000000) { | 670 | } else if (rate <= 25000000) { |
660 | oclk_dly = 180; | 671 | oclk_dly = host->clk_delays[SDXC_CLK_25M].output; |
661 | sclk_dly = 75; | 672 | sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; |
662 | } else if (rate <= 50000000) { | 673 | } else if (rate <= 50000000) { |
663 | if (ios->timing == MMC_TIMING_UHS_DDR50) { | 674 | if (ios->timing == MMC_TIMING_UHS_DDR50) { |
664 | oclk_dly = 60; | 675 | oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; |
665 | sclk_dly = 120; | 676 | sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; |
666 | } else { | 677 | } else { |
667 | oclk_dly = 90; | 678 | oclk_dly = host->clk_delays[SDXC_CLK_50M].output; |
668 | sclk_dly = 150; | 679 | sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; |
669 | } | 680 | } |
670 | } else if (rate <= 100000000) { | ||
671 | oclk_dly = 6; | ||
672 | sclk_dly = 24; | ||
673 | } else if (rate <= 200000000) { | ||
674 | oclk_dly = 3; | ||
675 | sclk_dly = 12; | ||
676 | } else { | 681 | } else { |
677 | return -EINVAL; | 682 | return -EINVAL; |
678 | } | 683 | } |
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
871 | static const struct of_device_id sunxi_mmc_of_match[] = { | 876 | static const struct of_device_id sunxi_mmc_of_match[] = { |
872 | { .compatible = "allwinner,sun4i-a10-mmc", }, | 877 | { .compatible = "allwinner,sun4i-a10-mmc", }, |
873 | { .compatible = "allwinner,sun5i-a13-mmc", }, | 878 | { .compatible = "allwinner,sun5i-a13-mmc", }, |
879 | { .compatible = "allwinner,sun9i-a80-mmc", }, | ||
874 | { /* sentinel */ } | 880 | { /* sentinel */ } |
875 | }; | 881 | }; |
876 | MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); | 882 | MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); |
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { | |||
884 | .hw_reset = sunxi_mmc_hw_reset, | 890 | .hw_reset = sunxi_mmc_hw_reset, |
885 | }; | 891 | }; |
886 | 892 | ||
893 | static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { | ||
894 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | ||
895 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | ||
896 | [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, | ||
897 | [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, | ||
898 | }; | ||
899 | |||
900 | static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { | ||
901 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | ||
902 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | ||
903 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, | ||
904 | [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, | ||
905 | }; | ||
906 | |||
887 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | 907 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
888 | struct platform_device *pdev) | 908 | struct platform_device *pdev) |
889 | { | 909 | { |
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | |||
895 | else | 915 | else |
896 | host->idma_des_size_bits = 16; | 916 | host->idma_des_size_bits = 16; |
897 | 917 | ||
918 | if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) | ||
919 | host->clk_delays = sun9i_mmc_clk_delays; | ||
920 | else | ||
921 | host->clk_delays = sunxi_mmc_clk_delays; | ||
922 | |||
898 | ret = mmc_regulator_get_supply(host->mmc); | 923 | ret = mmc_regulator_get_supply(host->mmc); |
899 | if (ret) { | 924 | if (ret) { |
900 | if (ret != -EPROBE_DEFER) | 925 | if (ret != -EPROBE_DEFER) |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 2426db88db36..f04445b992f5 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) | |||
879 | oob_chunk_size); | 879 | oob_chunk_size); |
880 | 880 | ||
881 | /* the last chunk */ | 881 | /* the last chunk */ |
882 | memcpy16_toio(&s[oob_chunk_size * sparebuf_size], | 882 | memcpy16_toio(&s[i * sparebuf_size], |
883 | &d[i * oob_chunk_size], | 883 | &d[i * oob_chunk_size], |
884 | host->used_oobsize - i * oob_chunk_size); | 884 | host->used_oobsize - i * oob_chunk_size); |
885 | } | 885 | } |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index f97a58d6aae1..e7d333c162be 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
@@ -147,6 +147,10 @@ | |||
147 | #define NFC_ECC_MODE GENMASK(15, 12) | 147 | #define NFC_ECC_MODE GENMASK(15, 12) |
148 | #define NFC_RANDOM_SEED GENMASK(30, 16) | 148 | #define NFC_RANDOM_SEED GENMASK(30, 16) |
149 | 149 | ||
150 | /* NFC_USER_DATA helper macros */ | ||
151 | #define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ | ||
152 | ((buf)[2] << 16) | ((buf)[3] << 24)) | ||
153 | |||
150 | #define NFC_DEFAULT_TIMEOUT_MS 1000 | 154 | #define NFC_DEFAULT_TIMEOUT_MS 1000 |
151 | 155 | ||
152 | #define NFC_SRAM_SIZE 1024 | 156 | #define NFC_SRAM_SIZE 1024 |
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, | |||
646 | offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; | 650 | offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; |
647 | 651 | ||
648 | /* Fill OOB data in */ | 652 | /* Fill OOB data in */ |
649 | if (oob_required) { | 653 | writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + |
650 | tmp = 0xffffffff; | 654 | layout->oobfree[i].offset), |
651 | memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, | 655 | nfc->regs + NFC_REG_USER_DATA_BASE); |
652 | 4); | ||
653 | } else { | ||
654 | memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, | ||
655 | chip->oob_poi + offset - mtd->writesize, | ||
656 | 4); | ||
657 | } | ||
658 | 656 | ||
659 | chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); | 657 | chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); |
660 | 658 | ||
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, | |||
784 | offset += ecc->size; | 782 | offset += ecc->size; |
785 | 783 | ||
786 | /* Fill OOB data in */ | 784 | /* Fill OOB data in */ |
787 | if (oob_required) { | 785 | writel(NFC_BUF_TO_USER_DATA(oob), |
788 | tmp = 0xffffffff; | 786 | nfc->regs + NFC_REG_USER_DATA_BASE); |
789 | memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, | ||
790 | 4); | ||
791 | } else { | ||
792 | memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, | ||
793 | 4); | ||
794 | } | ||
795 | 787 | ||
796 | tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | | 788 | tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | |
797 | (1 << 30); | 789 | (1 << 30); |
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) | |||
1389 | node); | 1381 | node); |
1390 | nand_release(&chip->mtd); | 1382 | nand_release(&chip->mtd); |
1391 | sunxi_nand_ecc_cleanup(&chip->nand.ecc); | 1383 | sunxi_nand_ecc_cleanup(&chip->nand.ecc); |
1384 | list_del(&chip->node); | ||
1392 | } | 1385 | } |
1393 | } | 1386 | } |
1394 | 1387 | ||
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, | |||
926 | goto bad; | 926 | goto bad; |
927 | } | 927 | } |
928 | 928 | ||
929 | if (data_size > ubi->leb_size) { | ||
930 | ubi_err(ubi, "bad data_size"); | ||
931 | goto bad; | ||
932 | } | ||
933 | |||
929 | if (vol_type == UBI_VID_STATIC) { | 934 | if (vol_type == UBI_VID_STATIC) { |
930 | /* | 935 | /* |
931 | * Although from high-level point of view static volumes may | 936 | * Although from high-level point of view static volumes may |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, | |||
649 | if (ubi->corr_peb_count) | 649 | if (ubi->corr_peb_count) |
650 | ubi_err(ubi, "%d PEBs are corrupted and not used", | 650 | ubi_err(ubi, "%d PEBs are corrupted and not used", |
651 | ubi->corr_peb_count); | 651 | ubi->corr_peb_count); |
652 | return -ENOSPC; | ||
652 | } | 653 | } |
653 | ubi->rsvd_pebs += reserved_pebs; | 654 | ubi->rsvd_pebs += reserved_pebs; |
654 | ubi->avail_pebs -= reserved_pebs; | 655 | ubi->avail_pebs -= reserved_pebs; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1601 | if (ubi->corr_peb_count) | 1601 | if (ubi->corr_peb_count) |
1602 | ubi_err(ubi, "%d PEBs are corrupted and not used", | 1602 | ubi_err(ubi, "%d PEBs are corrupted and not used", |
1603 | ubi->corr_peb_count); | 1603 | ubi->corr_peb_count); |
1604 | err = -ENOSPC; | ||
1604 | goto out_free; | 1605 | goto out_free; |
1605 | } | 1606 | } |
1606 | ubi->avail_pebs -= reserved_pebs; | 1607 | ubi->avail_pebs -= reserved_pebs; |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 10f71c732b59..816d0e94961c 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev) | |||
326 | dev->type = ARPHRD_ARCNET; | 326 | dev->type = ARPHRD_ARCNET; |
327 | dev->netdev_ops = &arcnet_netdev_ops; | 327 | dev->netdev_ops = &arcnet_netdev_ops; |
328 | dev->header_ops = &arcnet_header_ops; | 328 | dev->header_ops = &arcnet_header_ops; |
329 | dev->hard_header_len = sizeof(struct archdr); | 329 | dev->hard_header_len = sizeof(struct arc_hardware); |
330 | dev->mtu = choose_mtu(); | 330 | dev->mtu = choose_mtu(); |
331 | 331 | ||
332 | dev->addr_len = ARCNET_ALEN; | 332 | dev->addr_len = ARCNET_ALEN; |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 6f13f7206762..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -2000,6 +2000,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) | |||
2000 | */ | 2000 | */ |
2001 | reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); | 2001 | reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); |
2002 | if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { | 2002 | if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { |
2003 | reg &= ~PORT_PCS_CTRL_UNFORCED; | ||
2003 | reg |= PORT_PCS_CTRL_FORCE_LINK | | 2004 | reg |= PORT_PCS_CTRL_FORCE_LINK | |
2004 | PORT_PCS_CTRL_LINK_UP | | 2005 | PORT_PCS_CTRL_LINK_UP | |
2005 | PORT_PCS_CTRL_DUPLEX_FULL | | 2006 | PORT_PCS_CTRL_DUPLEX_FULL | |
@@ -2050,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) | |||
2050 | reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; | 2051 | reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; |
2051 | else | 2052 | else |
2052 | reg |= PORT_CONTROL_FRAME_MODE_DSA; | 2053 | reg |= PORT_CONTROL_FRAME_MODE_DSA; |
2054 | reg |= PORT_CONTROL_FORWARD_UNKNOWN | | ||
2055 | PORT_CONTROL_FORWARD_UNKNOWN_MC; | ||
2053 | } | 2056 | } |
2054 | 2057 | ||
2055 | if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || | 2058 | if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index cfa37041ab71..c4bb8027b3fb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev) | |||
689 | netdev_dbg(ndev, "No phy-handle found in DT\n"); | 689 | netdev_dbg(ndev, "No phy-handle found in DT\n"); |
690 | return -ENODEV; | 690 | return -ENODEV; |
691 | } | 691 | } |
692 | pdata->phy_dev = of_phy_find_device(phy_np); | ||
693 | } | ||
694 | 692 | ||
695 | phy_dev = pdata->phy_dev; | 693 | phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, |
694 | 0, pdata->phy_mode); | ||
695 | if (!phy_dev) { | ||
696 | netdev_err(ndev, "Could not connect to PHY\n"); | ||
697 | return -ENODEV; | ||
698 | } | ||
699 | |||
700 | pdata->phy_dev = phy_dev; | ||
701 | } else { | ||
702 | phy_dev = pdata->phy_dev; | ||
696 | 703 | ||
697 | if (!phy_dev || | 704 | if (!phy_dev || |
698 | phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, | 705 | phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, |
699 | pdata->phy_mode)) { | 706 | pdata->phy_mode)) { |
700 | netdev_err(ndev, "Could not connect to PHY\n"); | 707 | netdev_err(ndev, "Could not connect to PHY\n"); |
701 | return -ENODEV; | 708 | return -ENODEV; |
709 | } | ||
702 | } | 710 | } |
703 | 711 | ||
704 | pdata->phy_speed = SPEED_UNKNOWN; | 712 | pdata->phy_speed = SPEED_UNKNOWN; |
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index f9cb99bfb511..ffd180570920 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c | |||
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = { | |||
78 | { .compatible = "snps,arc-emac" }, | 78 | { .compatible = "snps,arc-emac" }, |
79 | { /* Sentinel */ } | 79 | { /* Sentinel */ } |
80 | }; | 80 | }; |
81 | MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); | ||
81 | 82 | ||
82 | static struct platform_driver emac_arc_driver = { | 83 | static struct platform_driver emac_arc_driver = { |
83 | .probe = emac_arc_probe, | 84 | .probe = emac_arc_probe, |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b9a5a97ed4dd..f1b5364f3521 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = { | |||
2079 | { .compatible = "brcm,systemport" }, | 2079 | { .compatible = "brcm,systemport" }, |
2080 | { /* sentinel */ } | 2080 | { /* sentinel */ } |
2081 | }; | 2081 | }; |
2082 | MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); | ||
2082 | 2083 | ||
2083 | static struct platform_driver bcm_sysport_driver = { | 2084 | static struct platform_driver bcm_sysport_driver = { |
2084 | .probe = bcm_sysport_probe, | 2085 | .probe = bcm_sysport_probe, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index ba936635322a..b5e64b02200c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1946,6 +1946,7 @@ struct bnx2x { | |||
1946 | u16 vlan_cnt; | 1946 | u16 vlan_cnt; |
1947 | u16 vlan_credit; | 1947 | u16 vlan_credit; |
1948 | u16 vxlan_dst_port; | 1948 | u16 vxlan_dst_port; |
1949 | u8 vxlan_dst_port_count; | ||
1949 | bool accept_any_vlan; | 1950 | bool accept_any_vlan; |
1950 | }; | 1951 | }; |
1951 | 1952 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e3da2bddf143..f1d62d5dbaff 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -3705,16 +3705,14 @@ out: | |||
3705 | 3705 | ||
3706 | void bnx2x_update_mfw_dump(struct bnx2x *bp) | 3706 | void bnx2x_update_mfw_dump(struct bnx2x *bp) |
3707 | { | 3707 | { |
3708 | struct timeval epoc; | ||
3709 | u32 drv_ver; | 3708 | u32 drv_ver; |
3710 | u32 valid_dump; | 3709 | u32 valid_dump; |
3711 | 3710 | ||
3712 | if (!SHMEM2_HAS(bp, drv_info)) | 3711 | if (!SHMEM2_HAS(bp, drv_info)) |
3713 | return; | 3712 | return; |
3714 | 3713 | ||
3715 | /* Update Driver load time */ | 3714 | /* Update Driver load time, possibly broken in y2038 */ |
3716 | do_gettimeofday(&epoc); | 3715 | SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); |
3717 | SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec); | ||
3718 | 3716 | ||
3719 | drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); | 3717 | drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); |
3720 | SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); | 3718 | SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); |
@@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) | |||
10110 | if (!netif_running(bp->dev)) | 10108 | if (!netif_running(bp->dev)) |
10111 | return; | 10109 | return; |
10112 | 10110 | ||
10113 | if (bp->vxlan_dst_port || !IS_PF(bp)) { | 10111 | if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { |
10112 | bp->vxlan_dst_port_count++; | ||
10113 | return; | ||
10114 | } | ||
10115 | |||
10116 | if (bp->vxlan_dst_port_count || !IS_PF(bp)) { | ||
10114 | DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); | 10117 | DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); |
10115 | return; | 10118 | return; |
10116 | } | 10119 | } |
10117 | 10120 | ||
10118 | bp->vxlan_dst_port = port; | 10121 | bp->vxlan_dst_port = port; |
10122 | bp->vxlan_dst_port_count = 1; | ||
10119 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); | 10123 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); |
10120 | } | 10124 | } |
10121 | 10125 | ||
@@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev, | |||
10130 | 10134 | ||
10131 | static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) | 10135 | static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) |
10132 | { | 10136 | { |
10133 | if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { | 10137 | if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || |
10138 | !IS_PF(bp)) { | ||
10134 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); | 10139 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); |
10135 | return; | 10140 | return; |
10136 | } | 10141 | } |
10142 | bp->vxlan_dst_port--; | ||
10143 | if (bp->vxlan_dst_port) | ||
10144 | return; | ||
10137 | 10145 | ||
10138 | if (netif_running(bp->dev)) { | 10146 | if (netif_running(bp->dev)) { |
10139 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); | 10147 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index c9bd7f16018e..ff702a707a91 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
@@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp, | |||
4319 | 4319 | ||
4320 | /* RSS keys */ | 4320 | /* RSS keys */ |
4321 | if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { | 4321 | if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { |
4322 | memcpy(&data->rss_key[0], &p->rss_key[0], | 4322 | u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key); |
4323 | sizeof(data->rss_key)); | 4323 | const u8 *src = (const u8 *)p->rss_key; |
4324 | int i; | ||
4325 | |||
4326 | /* Apparently, bnx2x reads this array in reverse order | ||
4327 | * We need to byte swap rss_key to comply with Toeplitz specs. | ||
4328 | */ | ||
4329 | for (i = 0; i < sizeof(data->rss_key); i++) | ||
4330 | *--dst = *src++; | ||
4331 | |||
4324 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; | 4332 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; |
4325 | } | 4333 | } |
4326 | 4334 | ||
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fadbd0088d3e..3bc701e4c59e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -3155,6 +3155,7 @@ static const struct of_device_id bcmgenet_match[] = { | |||
3155 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | 3155 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, |
3156 | { }, | 3156 | { }, |
3157 | }; | 3157 | }; |
3158 | MODULE_DEVICE_TABLE(of, bcmgenet_match); | ||
3158 | 3159 | ||
3159 | static int bcmgenet_probe(struct platform_device *pdev) | 3160 | static int bcmgenet_probe(struct platform_device *pdev) |
3160 | { | 3161 | { |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) | |||
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | /* Flush FLI data fifo. */ | 1545 | /* Flush FLI data fifo. */ |
1546 | static u32 | 1546 | static int |
1547 | bfa_flash_fifo_flush(void __iomem *pci_bar) | 1547 | bfa_flash_fifo_flush(void __iomem *pci_bar) |
1548 | { | 1548 | { |
1549 | u32 i; | 1549 | u32 i; |
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) | |||
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | /* Read flash status. */ | 1575 | /* Read flash status. */ |
1576 | static u32 | 1576 | static int |
1577 | bfa_flash_status_read(void __iomem *pci_bar) | 1577 | bfa_flash_status_read(void __iomem *pci_bar) |
1578 | { | 1578 | { |
1579 | union bfa_flash_dev_status_reg dev_status; | 1579 | union bfa_flash_dev_status_reg dev_status; |
1580 | u32 status; | 1580 | int status; |
1581 | u32 ret_status; | 1581 | u32 ret_status; |
1582 | int i; | 1582 | int i; |
1583 | 1583 | ||
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) | |||
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | /* Start flash read operation. */ | 1613 | /* Start flash read operation. */ |
1614 | static u32 | 1614 | static int |
1615 | bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, | 1615 | bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, |
1616 | char *buf) | 1616 | char *buf) |
1617 | { | 1617 | { |
1618 | u32 status; | 1618 | int status; |
1619 | 1619 | ||
1620 | /* len must be mutiple of 4 and not exceeding fifo size */ | 1620 | /* len must be mutiple of 4 and not exceeding fifo size */ |
1621 | if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) | 1621 | if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) |
@@ -1703,7 +1703,8 @@ static enum bfa_status | |||
1703 | bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, | 1703 | bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, |
1704 | u32 len) | 1704 | u32 len) |
1705 | { | 1705 | { |
1706 | u32 n, status; | 1706 | u32 n; |
1707 | int status; | ||
1707 | u32 off, l, s, residue, fifo_sz; | 1708 | u32 off, l, s, residue, fifo_sz; |
1708 | 1709 | ||
1709 | residue = len; | 1710 | residue = len; |
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5d0753cc7e73..04b0d16b210e 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c | |||
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2400 | q0->rcb->id = 0; | 2400 | q0->rcb->id = 0; |
2401 | q0->rx_packets = q0->rx_bytes = 0; | 2401 | q0->rx_packets = q0->rx_bytes = 0; |
2402 | q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; | 2402 | q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; |
2403 | q0->rxbuf_map_failed = 0; | ||
2403 | 2404 | ||
2404 | bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, | 2405 | bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, |
2405 | &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); | 2406 | &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); |
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2428 | : rx_cfg->q1_buf_size; | 2429 | : rx_cfg->q1_buf_size; |
2429 | q1->rx_packets = q1->rx_bytes = 0; | 2430 | q1->rx_packets = q1->rx_bytes = 0; |
2430 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; | 2431 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; |
2432 | q1->rxbuf_map_failed = 0; | ||
2431 | 2433 | ||
2432 | bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, | 2434 | bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, |
2433 | &hqpt_mem[i], &hsqpt_mem[i], | 2435 | &hqpt_mem[i], &hsqpt_mem[i], |
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index e0e797f2ea14..c438d032e8bf 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h | |||
@@ -587,6 +587,7 @@ struct bna_rxq { | |||
587 | u64 rx_bytes; | 587 | u64 rx_bytes; |
588 | u64 rx_packets_with_error; | 588 | u64 rx_packets_with_error; |
589 | u64 rxbuf_alloc_failed; | 589 | u64 rxbuf_alloc_failed; |
590 | u64 rxbuf_map_failed; | ||
590 | }; | 591 | }; |
591 | 592 | ||
592 | /* RxQ pair */ | 593 | /* RxQ pair */ |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 506047c38607..21a0cfc3e7ec 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |||
399 | } | 399 | } |
400 | 400 | ||
401 | dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, | 401 | dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, |
402 | unmap_q->map_size, DMA_FROM_DEVICE); | 402 | unmap_q->map_size, DMA_FROM_DEVICE); |
403 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { | ||
404 | put_page(page); | ||
405 | BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); | ||
406 | rcb->rxq->rxbuf_map_failed++; | ||
407 | goto finishing; | ||
408 | } | ||
403 | 409 | ||
404 | unmap->page = page; | 410 | unmap->page = page; |
405 | unmap->page_offset = page_offset; | 411 | unmap->page_offset = page_offset; |
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |||
454 | rcb->rxq->rxbuf_alloc_failed++; | 460 | rcb->rxq->rxbuf_alloc_failed++; |
455 | goto finishing; | 461 | goto finishing; |
456 | } | 462 | } |
463 | |||
457 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | 464 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
458 | buff_sz, DMA_FROM_DEVICE); | 465 | buff_sz, DMA_FROM_DEVICE); |
466 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { | ||
467 | dev_kfree_skb_any(skb); | ||
468 | BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); | ||
469 | rcb->rxq->rxbuf_map_failed++; | ||
470 | goto finishing; | ||
471 | } | ||
459 | 472 | ||
460 | unmap->skb = skb; | 473 | unmap->skb = skb; |
461 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | 474 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); |
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3025 | unmap = head_unmap; | 3038 | unmap = head_unmap; |
3026 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | 3039 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
3027 | len, DMA_TO_DEVICE); | 3040 | len, DMA_TO_DEVICE); |
3041 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { | ||
3042 | dev_kfree_skb_any(skb); | ||
3043 | BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); | ||
3044 | return NETDEV_TX_OK; | ||
3045 | } | ||
3028 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); | 3046 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); |
3029 | txqent->vector[0].length = htons(len); | 3047 | txqent->vector[0].length = htons(len); |
3030 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); | 3048 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); |
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3056 | 3074 | ||
3057 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, | 3075 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, |
3058 | 0, size, DMA_TO_DEVICE); | 3076 | 0, size, DMA_TO_DEVICE); |
3077 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { | ||
3078 | /* Undo the changes starting at tcb->producer_index */ | ||
3079 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, | ||
3080 | tcb->producer_index); | ||
3081 | dev_kfree_skb_any(skb); | ||
3082 | BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); | ||
3083 | return NETDEV_TX_OK; | ||
3084 | } | ||
3085 | |||
3059 | dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); | 3086 | dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); |
3060 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); | 3087 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); |
3061 | txqent->vector[vect_id].length = htons(size); | 3088 | txqent->vector[vect_id].length = htons(size); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index faedbf24777e..f4ed816b93ee 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h | |||
@@ -175,6 +175,7 @@ struct bnad_drv_stats { | |||
175 | u64 tx_skb_headlen_zero; | 175 | u64 tx_skb_headlen_zero; |
176 | u64 tx_skb_frag_zero; | 176 | u64 tx_skb_frag_zero; |
177 | u64 tx_skb_len_mismatch; | 177 | u64 tx_skb_len_mismatch; |
178 | u64 tx_skb_map_failed; | ||
178 | 179 | ||
179 | u64 hw_stats_updates; | 180 | u64 hw_stats_updates; |
180 | u64 netif_rx_dropped; | 181 | u64 netif_rx_dropped; |
@@ -189,6 +190,7 @@ struct bnad_drv_stats { | |||
189 | u64 rx_unmap_q_alloc_failed; | 190 | u64 rx_unmap_q_alloc_failed; |
190 | 191 | ||
191 | u64 rxbuf_alloc_failed; | 192 | u64 rxbuf_alloc_failed; |
193 | u64 rxbuf_map_failed; | ||
192 | }; | 194 | }; |
193 | 195 | ||
194 | /* Complete driver stats */ | 196 | /* Complete driver stats */ |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 2bdfc5dff4b1..0e4fdc3dd729 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c | |||
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |||
90 | "tx_skb_headlen_zero", | 90 | "tx_skb_headlen_zero", |
91 | "tx_skb_frag_zero", | 91 | "tx_skb_frag_zero", |
92 | "tx_skb_len_mismatch", | 92 | "tx_skb_len_mismatch", |
93 | "tx_skb_map_failed", | ||
93 | "hw_stats_updates", | 94 | "hw_stats_updates", |
94 | "netif_rx_dropped", | 95 | "netif_rx_dropped", |
95 | 96 | ||
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |||
102 | "tx_unmap_q_alloc_failed", | 103 | "tx_unmap_q_alloc_failed", |
103 | "rx_unmap_q_alloc_failed", | 104 | "rx_unmap_q_alloc_failed", |
104 | "rxbuf_alloc_failed", | 105 | "rxbuf_alloc_failed", |
106 | "rxbuf_map_failed", | ||
105 | 107 | ||
106 | "mac_stats_clr_cnt", | 108 | "mac_stats_clr_cnt", |
107 | "mac_frame_64", | 109 | "mac_frame_64", |
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) | |||
807 | rx_packets_with_error; | 809 | rx_packets_with_error; |
808 | buf[bi++] = rcb->rxq-> | 810 | buf[bi++] = rcb->rxq-> |
809 | rxbuf_alloc_failed; | 811 | rxbuf_alloc_failed; |
812 | buf[bi++] = rcb->rxq->rxbuf_map_failed; | ||
810 | buf[bi++] = rcb->producer_index; | 813 | buf[bi++] = rcb->producer_index; |
811 | buf[bi++] = rcb->consumer_index; | 814 | buf[bi++] = rcb->consumer_index; |
812 | } | 815 | } |
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) | |||
821 | rx_packets_with_error; | 824 | rx_packets_with_error; |
822 | buf[bi++] = rcb->rxq-> | 825 | buf[bi++] = rcb->rxq-> |
823 | rxbuf_alloc_failed; | 826 | rxbuf_alloc_failed; |
827 | buf[bi++] = rcb->rxq->rxbuf_map_failed; | ||
824 | buf[bi++] = rcb->producer_index; | 828 | buf[bi++] = rcb->producer_index; |
825 | buf[bi++] = rcb->consumer_index; | 829 | buf[bi++] = rcb->consumer_index; |
826 | } | 830 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 8353a6cbfcc2..03ed00c49823 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
@@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
157 | CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ | 157 | CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ |
158 | CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ | 158 | CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ |
159 | CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ | 159 | CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ |
160 | CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */ | ||
161 | CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */ | ||
162 | CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ | ||
163 | CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ | ||
164 | CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ | ||
160 | 165 | ||
161 | /* T6 adapters: | 166 | /* T6 adapters: |
162 | */ | 167 | */ |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 0a27805cbbbd..821540913343 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -582,6 +582,7 @@ struct be_adapter { | |||
582 | u16 pvid; | 582 | u16 pvid; |
583 | __be16 vxlan_port; | 583 | __be16 vxlan_port; |
584 | int vxlan_port_count; | 584 | int vxlan_port_count; |
585 | int vxlan_port_aliases; | ||
585 | struct phy_info phy; | 586 | struct phy_info phy; |
586 | u8 wol_cap; | 587 | u8 wol_cap; |
587 | bool wol_en; | 588 | bool wol_en; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 12687bf52b95..7bf51a1a0a77 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -5176,6 +5176,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
5176 | if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) | 5176 | if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) |
5177 | return; | 5177 | return; |
5178 | 5178 | ||
5179 | if (adapter->vxlan_port == port && adapter->vxlan_port_count) { | ||
5180 | adapter->vxlan_port_aliases++; | ||
5181 | return; | ||
5182 | } | ||
5183 | |||
5179 | if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { | 5184 | if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { |
5180 | dev_info(dev, | 5185 | dev_info(dev, |
5181 | "Only one UDP port supported for VxLAN offloads\n"); | 5186 | "Only one UDP port supported for VxLAN offloads\n"); |
@@ -5226,6 +5231,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
5226 | if (adapter->vxlan_port != port) | 5231 | if (adapter->vxlan_port != port) |
5227 | goto done; | 5232 | goto done; |
5228 | 5233 | ||
5234 | if (adapter->vxlan_port_aliases) { | ||
5235 | adapter->vxlan_port_aliases--; | ||
5236 | return; | ||
5237 | } | ||
5238 | |||
5229 | be_disable_vxlan_offloads(adapter); | 5239 | be_disable_vxlan_offloads(adapter); |
5230 | 5240 | ||
5231 | dev_info(&adapter->pdev->dev, | 5241 | dev_info(&adapter->pdev->dev, |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4b69d061d90f..710715fcb23d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev) | |||
1710 | * everything for us? Resetting it takes the link down and requires | 1710 | * everything for us? Resetting it takes the link down and requires |
1711 | * several seconds for it to come back. | 1711 | * several seconds for it to come back. |
1712 | */ | 1712 | */ |
1713 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) | 1713 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { |
1714 | put_device(&tbiphy->dev); | ||
1714 | return; | 1715 | return; |
1716 | } | ||
1715 | 1717 | ||
1716 | /* Single clk mode, mii mode off(for serdes communication) */ | 1718 | /* Single clk mode, mii mode off(for serdes communication) */ |
1717 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); | 1719 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
@@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev) | |||
1723 | phy_write(tbiphy, MII_BMCR, | 1725 | phy_write(tbiphy, MII_BMCR, |
1724 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | | 1726 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
1725 | BMCR_SPEED1000); | 1727 | BMCR_SPEED1000); |
1728 | |||
1729 | put_device(&tbiphy->dev); | ||
1726 | } | 1730 | } |
1727 | 1731 | ||
1728 | static int __gfar_is_rx_idle(struct gfar_private *priv) | 1732 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
@@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) | |||
1970 | /* Install our interrupt handlers for Error, | 1974 | /* Install our interrupt handlers for Error, |
1971 | * Transmit, and Receive | 1975 | * Transmit, and Receive |
1972 | */ | 1976 | */ |
1973 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, | 1977 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
1974 | IRQF_NO_SUSPEND, | ||
1975 | gfar_irq(grp, ER)->name, grp); | 1978 | gfar_irq(grp, ER)->name, grp); |
1976 | if (err < 0) { | 1979 | if (err < 0) { |
1977 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | 1980 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
@@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) | |||
1979 | 1982 | ||
1980 | goto err_irq_fail; | 1983 | goto err_irq_fail; |
1981 | } | 1984 | } |
1985 | enable_irq_wake(gfar_irq(grp, ER)->irq); | ||
1986 | |||
1982 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, | 1987 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
1983 | gfar_irq(grp, TX)->name, grp); | 1988 | gfar_irq(grp, TX)->name, grp); |
1984 | if (err < 0) { | 1989 | if (err < 0) { |
@@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) | |||
1994 | goto rx_irq_fail; | 1999 | goto rx_irq_fail; |
1995 | } | 2000 | } |
1996 | } else { | 2001 | } else { |
1997 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, | 2002 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
1998 | IRQF_NO_SUSPEND, | ||
1999 | gfar_irq(grp, TX)->name, grp); | 2003 | gfar_irq(grp, TX)->name, grp); |
2000 | if (err < 0) { | 2004 | if (err < 0) { |
2001 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | 2005 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
2002 | gfar_irq(grp, TX)->irq); | 2006 | gfar_irq(grp, TX)->irq); |
2003 | goto err_irq_fail; | 2007 | goto err_irq_fail; |
2004 | } | 2008 | } |
2009 | enable_irq_wake(gfar_irq(grp, TX)->irq); | ||
2005 | } | 2010 | } |
2006 | 2011 | ||
2007 | return 0; | 2012 | return 0; |
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 8e3cd77aa347..664d0c261269 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = { | |||
557 | { .compatible = "fsl,etsec-ptp" }, | 557 | { .compatible = "fsl,etsec-ptp" }, |
558 | {}, | 558 | {}, |
559 | }; | 559 | }; |
560 | MODULE_DEVICE_TABLE(of, match_table); | ||
560 | 561 | ||
561 | static struct platform_driver gianfar_ptp_driver = { | 562 | static struct platform_driver gianfar_ptp_driver = { |
562 | .driver = { | 563 | .driver = { |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 4dd40e057f40..650f7888e32b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) | |||
1384 | value = phy_read(tbiphy, ENET_TBI_MII_CR); | 1384 | value = phy_read(tbiphy, ENET_TBI_MII_CR); |
1385 | value &= ~0x1000; /* Turn off autonegotiation */ | 1385 | value &= ~0x1000; /* Turn off autonegotiation */ |
1386 | phy_write(tbiphy, ENET_TBI_MII_CR, value); | 1386 | phy_write(tbiphy, ENET_TBI_MII_CR, value); |
1387 | |||
1388 | put_device(&tbiphy->dev); | ||
1387 | } | 1389 | } |
1388 | 1390 | ||
1389 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | 1391 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); |
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev) | |||
1702 | * everything for us? Resetting it takes the link down and requires | 1704 | * everything for us? Resetting it takes the link down and requires |
1703 | * several seconds for it to come back. | 1705 | * several seconds for it to come back. |
1704 | */ | 1706 | */ |
1705 | if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) | 1707 | if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { |
1708 | put_device(&tbiphy->dev); | ||
1706 | return; | 1709 | return; |
1710 | } | ||
1707 | 1711 | ||
1708 | /* Single clk mode, mii mode off(for serdes communication) */ | 1712 | /* Single clk mode, mii mode off(for serdes communication) */ |
1709 | phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); | 1713 | phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); |
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev) | |||
1711 | phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); | 1715 | phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); |
1712 | 1716 | ||
1713 | phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); | 1717 | phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); |
1718 | |||
1719 | put_device(&tbiphy->dev); | ||
1714 | } | 1720 | } |
1715 | 1721 | ||
1716 | /* Configure the PHY for dev. | 1722 | /* Configure the PHY for dev. |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) | |||
816 | struct net_device *ndev; | 816 | struct net_device *ndev; |
817 | struct hip04_priv *priv; | 817 | struct hip04_priv *priv; |
818 | struct resource *res; | 818 | struct resource *res; |
819 | unsigned int irq; | 819 | int irq; |
820 | int ret; | 820 | int ret; |
821 | 821 | ||
822 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); | 822 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); |
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h | |||
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { | |||
460 | u32 index; | 460 | u32 index; |
461 | }; | 461 | }; |
462 | 462 | ||
463 | #define EMAC_ETHTOOL_REGS_VER 0 | 463 | #define EMAC_ETHTOOL_REGS_VER 3 |
464 | #define EMAC4_ETHTOOL_REGS_VER 1 | 464 | #define EMAC4_ETHTOOL_REGS_VER 4 |
465 | #define EMAC4SYNC_ETHTOOL_REGS_VER 2 | 465 | #define EMAC4SYNC_ETHTOOL_REGS_VER 5 |
466 | 466 | ||
467 | #endif /* __IBM_NEWEMAC_CORE_H */ | 467 | #endif /* __IBM_NEWEMAC_CORE_H */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..62488a67149d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, | |||
946 | /* take the lock before we start messing with the ring */ | 946 | /* take the lock before we start messing with the ring */ |
947 | mutex_lock(&hw->aq.arq_mutex); | 947 | mutex_lock(&hw->aq.arq_mutex); |
948 | 948 | ||
949 | if (hw->aq.arq.count == 0) { | ||
950 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, | ||
951 | "AQRX: Admin queue not initialized.\n"); | ||
952 | ret_code = I40E_ERR_QUEUE_EMPTY; | ||
953 | goto clean_arq_element_err; | ||
954 | } | ||
955 | |||
949 | /* set next_to_use to head */ | 956 | /* set next_to_use to head */ |
950 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); | 957 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); |
951 | if (ntu == ntc) { | 958 | if (ntu == ntc) { |
@@ -1007,6 +1014,8 @@ clean_arq_element_out: | |||
1007 | /* Set pending if needed, unlock and return */ | 1014 | /* Set pending if needed, unlock and return */ |
1008 | if (pending != NULL) | 1015 | if (pending != NULL) |
1009 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); | 1016 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
1017 | |||
1018 | clean_arq_element_err: | ||
1010 | mutex_unlock(&hw->aq.arq_mutex); | 1019 | mutex_unlock(&hw->aq.arq_mutex); |
1011 | 1020 | ||
1012 | if (i40e_is_nvm_update_op(&e->desc)) { | 1021 | if (i40e_is_nvm_update_op(&e->desc)) { |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..2fdf978ae6a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) | |||
2672 | rx_ctx.lrxqthresh = 2; | 2672 | rx_ctx.lrxqthresh = 2; |
2673 | rx_ctx.crcstrip = 1; | 2673 | rx_ctx.crcstrip = 1; |
2674 | rx_ctx.l2tsel = 1; | 2674 | rx_ctx.l2tsel = 1; |
2675 | rx_ctx.showiv = 1; | 2675 | /* this controls whether VLAN is stripped from inner headers */ |
2676 | rx_ctx.showiv = 0; | ||
2676 | #ifdef I40E_FCOE | 2677 | #ifdef I40E_FCOE |
2677 | rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); | 2678 | rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); |
2678 | #endif | 2679 | #endif |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..929d47152bf2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c | |||
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, | |||
887 | /* take the lock before we start messing with the ring */ | 887 | /* take the lock before we start messing with the ring */ |
888 | mutex_lock(&hw->aq.arq_mutex); | 888 | mutex_lock(&hw->aq.arq_mutex); |
889 | 889 | ||
890 | if (hw->aq.arq.count == 0) { | ||
891 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, | ||
892 | "AQRX: Admin queue not initialized.\n"); | ||
893 | ret_code = I40E_ERR_QUEUE_EMPTY; | ||
894 | goto clean_arq_element_err; | ||
895 | } | ||
896 | |||
890 | /* set next_to_use to head */ | 897 | /* set next_to_use to head */ |
891 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); | 898 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); |
892 | if (ntu == ntc) { | 899 | if (ntu == ntc) { |
@@ -948,6 +955,8 @@ clean_arq_element_out: | |||
948 | /* Set pending if needed, unlock and return */ | 955 | /* Set pending if needed, unlock and return */ |
949 | if (pending != NULL) | 956 | if (pending != NULL) |
950 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); | 957 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
958 | |||
959 | clean_arq_element_err: | ||
951 | mutex_unlock(&hw->aq.arq_mutex); | 960 | mutex_unlock(&hw->aq.arq_mutex); |
952 | 961 | ||
953 | return ret_code; | 962 | return ret_code; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fe2299ac4f5c..514df76fc70f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1479 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); | 1479 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); |
1480 | struct sk_buff *skb; | 1480 | struct sk_buff *skb; |
1481 | unsigned char *data; | 1481 | unsigned char *data; |
1482 | dma_addr_t phys_addr; | ||
1482 | u32 rx_status; | 1483 | u32 rx_status; |
1483 | int rx_bytes, err; | 1484 | int rx_bytes, err; |
1484 | 1485 | ||
@@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1486 | rx_status = rx_desc->status; | 1487 | rx_status = rx_desc->status; |
1487 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); | 1488 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); |
1488 | data = (unsigned char *)rx_desc->buf_cookie; | 1489 | data = (unsigned char *)rx_desc->buf_cookie; |
1490 | phys_addr = rx_desc->buf_phys_addr; | ||
1489 | 1491 | ||
1490 | if (!mvneta_rxq_desc_is_first_last(rx_status) || | 1492 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
1491 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { | 1493 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
@@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1534 | if (!skb) | 1536 | if (!skb) |
1535 | goto err_drop_frame; | 1537 | goto err_drop_frame; |
1536 | 1538 | ||
1537 | dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, | 1539 | dma_unmap_single(dev->dev.parent, phys_addr, |
1538 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1540 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1539 | 1541 | ||
1540 | rcvd_pkts++; | 1542 | rcvd_pkts++; |
@@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev) | |||
3173 | struct phy_device *phy = of_phy_find_device(dn); | 3175 | struct phy_device *phy = of_phy_find_device(dn); |
3174 | 3176 | ||
3175 | mvneta_fixed_link_update(pp, phy); | 3177 | mvneta_fixed_link_update(pp, phy); |
3178 | |||
3179 | put_device(&phy->dev); | ||
3176 | } | 3180 | } |
3177 | 3181 | ||
3178 | return 0; | 3182 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4402a1e48c9b..e7a5000aa12c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -1047,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |||
1047 | 1047 | ||
1048 | /* If we used up all the quota - we're probably not done yet... */ | 1048 | /* If we used up all the quota - we're probably not done yet... */ |
1049 | if (done == budget) { | 1049 | if (done == budget) { |
1050 | int cpu_curr; | ||
1051 | const struct cpumask *aff; | 1050 | const struct cpumask *aff; |
1051 | struct irq_data *idata; | ||
1052 | int cpu_curr; | ||
1052 | 1053 | ||
1053 | INC_PERF_COUNTER(priv->pstats.napi_quota); | 1054 | INC_PERF_COUNTER(priv->pstats.napi_quota); |
1054 | 1055 | ||
1055 | cpu_curr = smp_processor_id(); | 1056 | cpu_curr = smp_processor_id(); |
1056 | aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; | 1057 | idata = irq_desc_get_irq_data(cq->irq_desc); |
1058 | aff = irq_data_get_affinity_mask(idata); | ||
1057 | 1059 | ||
1058 | if (likely(cpumask_test_cpu(cpu_curr, aff))) | 1060 | if (likely(cpumask_test_cpu(cpu_curr, aff))) |
1059 | return budget; | 1061 | return budget; |
@@ -1268,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
1268 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | 1270 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
1269 | memcpy(rss_context->rss_key, priv->rss_key, | 1271 | memcpy(rss_context->rss_key, priv->rss_key, |
1270 | MLX4_EN_RSS_KEY_SIZE); | 1272 | MLX4_EN_RSS_KEY_SIZE); |
1271 | netdev_rss_key_fill(rss_context->rss_key, | ||
1272 | MLX4_EN_RSS_KEY_SIZE); | ||
1273 | } else { | 1273 | } else { |
1274 | en_err(priv, "Unknown RSS hash function requested\n"); | 1274 | en_err(priv, "Unknown RSS hash function requested\n"); |
1275 | err = -EINVAL; | 1275 | err = -EINVAL; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -1184,10 +1184,11 @@ out: | |||
1184 | if (prot == MLX4_PROT_ETH) { | 1184 | if (prot == MLX4_PROT_ETH) { |
1185 | /* manage the steering entry for promisc mode */ | 1185 | /* manage the steering entry for promisc mode */ |
1186 | if (new_entry) | 1186 | if (new_entry) |
1187 | new_steering_entry(dev, port, steer, index, qp->qpn); | 1187 | err = new_steering_entry(dev, port, steer, |
1188 | index, qp->qpn); | ||
1188 | else | 1189 | else |
1189 | existing_steering_entry(dev, port, steer, | 1190 | err = existing_steering_entry(dev, port, steer, |
1190 | index, qp->qpn); | 1191 | index, qp->qpn); |
1191 | } | 1192 | } |
1192 | if (err && link && index != -1) { | 1193 | if (err && link && index != -1) { |
1193 | if (index < dev->caps.num_mgms) | 1194 | if (index < dev->caps.num_mgms) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) | |||
200 | 200 | ||
201 | return err; | 201 | return err; |
202 | } | 202 | } |
203 | |||
204 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) | ||
205 | { | ||
206 | struct mlx5_cmd_query_special_contexts_mbox_in in; | ||
207 | struct mlx5_cmd_query_special_contexts_mbox_out out; | ||
208 | int err; | ||
209 | |||
210 | memset(&in, 0, sizeof(in)); | ||
211 | memset(&out, 0, sizeof(out)); | ||
212 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||
213 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
214 | if (err) | ||
215 | return err; | ||
216 | |||
217 | if (out.hdr.status) | ||
218 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
219 | |||
220 | *rsvd_lkey = be32_to_cpu(out.resd_lkey); | ||
221 | |||
222 | return err; | ||
223 | } | ||
224 | EXPORT_SYMBOL(mlx5_core_query_special_context); | ||
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 66d4ab703f45..60f43ec22175 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = { | |||
1601 | { .compatible = "micrel,ks8851" }, | 1601 | { .compatible = "micrel,ks8851" }, |
1602 | { } | 1602 | { } |
1603 | }; | 1603 | }; |
1604 | MODULE_DEVICE_TABLE(of, ks8851_match_table); | ||
1604 | 1605 | ||
1605 | static struct spi_driver ks8851_driver = { | 1606 | static struct spi_driver ks8851_driver = { |
1606 | .driver = { | 1607 | .driver = { |
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index becbb5f1f5a7..a10c928bbd6b 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c | |||
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = { | |||
552 | { .compatible = "moxa,moxart-mac" }, | 552 | { .compatible = "moxa,moxart-mac" }, |
553 | { } | 553 | { } |
554 | }; | 554 | }; |
555 | MODULE_DEVICE_TABLE(of, moxart_mac_match); | ||
555 | 556 | ||
556 | static struct platform_driver moxart_mac_driver = { | 557 | static struct platform_driver moxart_mac_driver = { |
557 | .probe = moxart_mac_probe, | 558 | .probe = moxart_mac_probe, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 06bcc734fe8d..d6696cfa11d2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -536,6 +536,7 @@ struct qlcnic_hardware_context { | |||
536 | u8 extend_lb_time; | 536 | u8 extend_lb_time; |
537 | u8 phys_port_id[ETH_ALEN]; | 537 | u8 phys_port_id[ETH_ALEN]; |
538 | u8 lb_mode; | 538 | u8 lb_mode; |
539 | u8 vxlan_port_count; | ||
539 | u16 vxlan_port; | 540 | u16 vxlan_port; |
540 | struct device *hwmon_dev; | 541 | struct device *hwmon_dev; |
541 | u32 post_mode; | 542 | u32 post_mode; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 8b08b20e8b30..d4481454b5f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev, | |||
483 | /* Adapter supports only one VXLAN port. Use very first port | 483 | /* Adapter supports only one VXLAN port. Use very first port |
484 | * for enabling offload | 484 | * for enabling offload |
485 | */ | 485 | */ |
486 | if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) | 486 | if (!qlcnic_encap_rx_offload(adapter)) |
487 | return; | 487 | return; |
488 | if (!ahw->vxlan_port_count) { | ||
489 | ahw->vxlan_port_count = 1; | ||
490 | ahw->vxlan_port = ntohs(port); | ||
491 | adapter->flags |= QLCNIC_ADD_VXLAN_PORT; | ||
492 | return; | ||
493 | } | ||
494 | if (ahw->vxlan_port == ntohs(port)) | ||
495 | ahw->vxlan_port_count++; | ||
488 | 496 | ||
489 | ahw->vxlan_port = ntohs(port); | ||
490 | adapter->flags |= QLCNIC_ADD_VXLAN_PORT; | ||
491 | } | 497 | } |
492 | 498 | ||
493 | static void qlcnic_del_vxlan_port(struct net_device *netdev, | 499 | static void qlcnic_del_vxlan_port(struct net_device *netdev, |
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, | |||
496 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 502 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
497 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 503 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
498 | 504 | ||
499 | if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || | 505 | if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || |
500 | (ahw->vxlan_port != ntohs(port))) | 506 | (ahw->vxlan_port != ntohs(port))) |
501 | return; | 507 | return; |
502 | 508 | ||
503 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; | 509 | ahw->vxlan_port_count--; |
510 | if (!ahw->vxlan_port_count) | ||
511 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; | ||
504 | } | 512 | } |
505 | 513 | ||
506 | static netdev_features_t qlcnic_features_check(struct sk_buff *skb, | 514 | static netdev_features_t qlcnic_features_check(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index d79e33b3c191..686334f4588d 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -157,6 +157,7 @@ enum { | |||
157 | NWayAdvert = 0x66, /* MII ADVERTISE */ | 157 | NWayAdvert = 0x66, /* MII ADVERTISE */ |
158 | NWayLPAR = 0x68, /* MII LPA */ | 158 | NWayLPAR = 0x68, /* MII LPA */ |
159 | NWayExpansion = 0x6A, /* MII Expansion */ | 159 | NWayExpansion = 0x6A, /* MII Expansion */ |
160 | TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */ | ||
160 | Config5 = 0xD8, /* Config5 */ | 161 | Config5 = 0xD8, /* Config5 */ |
161 | TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ | 162 | TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ |
162 | RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ | 163 | RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ |
@@ -341,6 +342,7 @@ struct cp_private { | |||
341 | unsigned tx_tail; | 342 | unsigned tx_tail; |
342 | struct cp_desc *tx_ring; | 343 | struct cp_desc *tx_ring; |
343 | struct sk_buff *tx_skb[CP_TX_RING_SIZE]; | 344 | struct sk_buff *tx_skb[CP_TX_RING_SIZE]; |
345 | u32 tx_opts[CP_TX_RING_SIZE]; | ||
344 | 346 | ||
345 | unsigned rx_buf_sz; | 347 | unsigned rx_buf_sz; |
346 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | 348 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ |
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp) | |||
665 | BUG_ON(!skb); | 667 | BUG_ON(!skb); |
666 | 668 | ||
667 | dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), | 669 | dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), |
668 | le32_to_cpu(txd->opts1) & 0xffff, | 670 | cp->tx_opts[tx_tail] & 0xffff, |
669 | PCI_DMA_TODEVICE); | 671 | PCI_DMA_TODEVICE); |
670 | 672 | ||
671 | if (status & LastFrag) { | 673 | if (status & LastFrag) { |
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
733 | { | 735 | { |
734 | struct cp_private *cp = netdev_priv(dev); | 736 | struct cp_private *cp = netdev_priv(dev); |
735 | unsigned entry; | 737 | unsigned entry; |
736 | u32 eor, flags; | 738 | u32 eor, opts1; |
737 | unsigned long intr_flags; | 739 | unsigned long intr_flags; |
738 | __le32 opts2; | 740 | __le32 opts2; |
739 | int mss = 0; | 741 | int mss = 0; |
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
753 | mss = skb_shinfo(skb)->gso_size; | 755 | mss = skb_shinfo(skb)->gso_size; |
754 | 756 | ||
755 | opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); | 757 | opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); |
758 | opts1 = DescOwn; | ||
759 | if (mss) | ||
760 | opts1 |= LargeSend | ((mss & MSSMask) << MSSShift); | ||
761 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
762 | const struct iphdr *ip = ip_hdr(skb); | ||
763 | if (ip->protocol == IPPROTO_TCP) | ||
764 | opts1 |= IPCS | TCPCS; | ||
765 | else if (ip->protocol == IPPROTO_UDP) | ||
766 | opts1 |= IPCS | UDPCS; | ||
767 | else { | ||
768 | WARN_ONCE(1, | ||
769 | "Net bug: asked to checksum invalid Legacy IP packet\n"); | ||
770 | goto out_dma_error; | ||
771 | } | ||
772 | } | ||
756 | 773 | ||
757 | if (skb_shinfo(skb)->nr_frags == 0) { | 774 | if (skb_shinfo(skb)->nr_frags == 0) { |
758 | struct cp_desc *txd = &cp->tx_ring[entry]; | 775 | struct cp_desc *txd = &cp->tx_ring[entry]; |
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
768 | txd->addr = cpu_to_le64(mapping); | 785 | txd->addr = cpu_to_le64(mapping); |
769 | wmb(); | 786 | wmb(); |
770 | 787 | ||
771 | flags = eor | len | DescOwn | FirstFrag | LastFrag; | 788 | opts1 |= eor | len | FirstFrag | LastFrag; |
772 | |||
773 | if (mss) | ||
774 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); | ||
775 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
776 | const struct iphdr *ip = ip_hdr(skb); | ||
777 | if (ip->protocol == IPPROTO_TCP) | ||
778 | flags |= IPCS | TCPCS; | ||
779 | else if (ip->protocol == IPPROTO_UDP) | ||
780 | flags |= IPCS | UDPCS; | ||
781 | else | ||
782 | WARN_ON(1); /* we need a WARN() */ | ||
783 | } | ||
784 | 789 | ||
785 | txd->opts1 = cpu_to_le32(flags); | 790 | txd->opts1 = cpu_to_le32(opts1); |
786 | wmb(); | 791 | wmb(); |
787 | 792 | ||
788 | cp->tx_skb[entry] = skb; | 793 | cp->tx_skb[entry] = skb; |
789 | entry = NEXT_TX(entry); | 794 | cp->tx_opts[entry] = opts1; |
795 | netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", | ||
796 | entry, skb->len); | ||
790 | } else { | 797 | } else { |
791 | struct cp_desc *txd; | 798 | struct cp_desc *txd; |
792 | u32 first_len, first_eor; | 799 | u32 first_len, first_eor, ctrl; |
793 | dma_addr_t first_mapping; | 800 | dma_addr_t first_mapping; |
794 | int frag, first_entry = entry; | 801 | int frag, first_entry = entry; |
795 | const struct iphdr *ip = ip_hdr(skb); | ||
796 | 802 | ||
797 | /* We must give this initial chunk to the device last. | 803 | /* We must give this initial chunk to the device last. |
798 | * Otherwise we could race with the device. | 804 | * Otherwise we could race with the device. |
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
805 | goto out_dma_error; | 811 | goto out_dma_error; |
806 | 812 | ||
807 | cp->tx_skb[entry] = skb; | 813 | cp->tx_skb[entry] = skb; |
808 | entry = NEXT_TX(entry); | ||
809 | 814 | ||
810 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 815 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
811 | const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 816 | const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
812 | u32 len; | 817 | u32 len; |
813 | u32 ctrl; | ||
814 | dma_addr_t mapping; | 818 | dma_addr_t mapping; |
815 | 819 | ||
820 | entry = NEXT_TX(entry); | ||
821 | |||
816 | len = skb_frag_size(this_frag); | 822 | len = skb_frag_size(this_frag); |
817 | mapping = dma_map_single(&cp->pdev->dev, | 823 | mapping = dma_map_single(&cp->pdev->dev, |
818 | skb_frag_address(this_frag), | 824 | skb_frag_address(this_frag), |
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
824 | 830 | ||
825 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 831 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
826 | 832 | ||
827 | ctrl = eor | len | DescOwn; | 833 | ctrl = opts1 | eor | len; |
828 | |||
829 | if (mss) | ||
830 | ctrl |= LargeSend | | ||
831 | ((mss & MSSMask) << MSSShift); | ||
832 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
833 | if (ip->protocol == IPPROTO_TCP) | ||
834 | ctrl |= IPCS | TCPCS; | ||
835 | else if (ip->protocol == IPPROTO_UDP) | ||
836 | ctrl |= IPCS | UDPCS; | ||
837 | else | ||
838 | BUG(); | ||
839 | } | ||
840 | 834 | ||
841 | if (frag == skb_shinfo(skb)->nr_frags - 1) | 835 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
842 | ctrl |= LastFrag; | 836 | ctrl |= LastFrag; |
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
849 | txd->opts1 = cpu_to_le32(ctrl); | 843 | txd->opts1 = cpu_to_le32(ctrl); |
850 | wmb(); | 844 | wmb(); |
851 | 845 | ||
846 | cp->tx_opts[entry] = ctrl; | ||
852 | cp->tx_skb[entry] = skb; | 847 | cp->tx_skb[entry] = skb; |
853 | entry = NEXT_TX(entry); | ||
854 | } | 848 | } |
855 | 849 | ||
856 | txd = &cp->tx_ring[first_entry]; | 850 | txd = &cp->tx_ring[first_entry]; |
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
858 | txd->addr = cpu_to_le64(first_mapping); | 852 | txd->addr = cpu_to_le64(first_mapping); |
859 | wmb(); | 853 | wmb(); |
860 | 854 | ||
861 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 855 | ctrl = opts1 | first_eor | first_len | FirstFrag; |
862 | if (ip->protocol == IPPROTO_TCP) | 856 | txd->opts1 = cpu_to_le32(ctrl); |
863 | txd->opts1 = cpu_to_le32(first_eor | first_len | | ||
864 | FirstFrag | DescOwn | | ||
865 | IPCS | TCPCS); | ||
866 | else if (ip->protocol == IPPROTO_UDP) | ||
867 | txd->opts1 = cpu_to_le32(first_eor | first_len | | ||
868 | FirstFrag | DescOwn | | ||
869 | IPCS | UDPCS); | ||
870 | else | ||
871 | BUG(); | ||
872 | } else | ||
873 | txd->opts1 = cpu_to_le32(first_eor | first_len | | ||
874 | FirstFrag | DescOwn); | ||
875 | wmb(); | 857 | wmb(); |
858 | |||
859 | cp->tx_opts[first_entry] = ctrl; | ||
860 | netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", | ||
861 | first_entry, entry, skb->len); | ||
876 | } | 862 | } |
877 | cp->tx_head = entry; | 863 | cp->tx_head = NEXT_TX(entry); |
878 | 864 | ||
879 | netdev_sent_queue(dev, skb->len); | 865 | netdev_sent_queue(dev, skb->len); |
880 | netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", | ||
881 | entry, skb->len); | ||
882 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) | 866 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) |
883 | netif_stop_queue(dev); | 867 | netif_stop_queue(dev); |
884 | 868 | ||
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp) | |||
1115 | { | 1099 | { |
1116 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1100 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1117 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); | 1101 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); |
1102 | memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); | ||
1118 | 1103 | ||
1119 | cp_init_rings_index(cp); | 1104 | cp_init_rings_index(cp); |
1120 | 1105 | ||
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1151 | desc = cp->rx_ring + i; | 1136 | desc = cp->rx_ring + i; |
1152 | dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), | 1137 | dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), |
1153 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1138 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1154 | dev_kfree_skb(cp->rx_skb[i]); | 1139 | dev_kfree_skb_any(cp->rx_skb[i]); |
1155 | } | 1140 | } |
1156 | } | 1141 | } |
1157 | 1142 | ||
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1164 | le32_to_cpu(desc->opts1) & 0xffff, | 1149 | le32_to_cpu(desc->opts1) & 0xffff, |
1165 | PCI_DMA_TODEVICE); | 1150 | PCI_DMA_TODEVICE); |
1166 | if (le32_to_cpu(desc->opts1) & LastFrag) | 1151 | if (le32_to_cpu(desc->opts1) & LastFrag) |
1167 | dev_kfree_skb(skb); | 1152 | dev_kfree_skb_any(skb); |
1168 | cp->dev->stats.tx_dropped++; | 1153 | cp->dev->stats.tx_dropped++; |
1169 | } | 1154 | } |
1170 | } | 1155 | } |
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1172 | 1157 | ||
1173 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | 1158 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); |
1174 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1159 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1160 | memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); | ||
1175 | 1161 | ||
1176 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); | 1162 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); |
1177 | memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); | 1163 | memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); |
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev) | |||
1249 | { | 1235 | { |
1250 | struct cp_private *cp = netdev_priv(dev); | 1236 | struct cp_private *cp = netdev_priv(dev); |
1251 | unsigned long flags; | 1237 | unsigned long flags; |
1252 | int rc; | 1238 | int rc, i; |
1253 | 1239 | ||
1254 | netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", | 1240 | netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", |
1255 | cpr8(Cmd), cpr16(CpCmd), | 1241 | cpr8(Cmd), cpr16(CpCmd), |
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev) | |||
1257 | 1243 | ||
1258 | spin_lock_irqsave(&cp->lock, flags); | 1244 | spin_lock_irqsave(&cp->lock, flags); |
1259 | 1245 | ||
1246 | netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", | ||
1247 | cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); | ||
1248 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | ||
1249 | netif_dbg(cp, tx_err, cp->dev, | ||
1250 | "TX slot %d @%p: %08x (%08x) %08x %llx %p\n", | ||
1251 | i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), | ||
1252 | cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), | ||
1253 | le64_to_cpu(cp->tx_ring[i].addr), | ||
1254 | cp->tx_skb[i]); | ||
1255 | } | ||
1256 | |||
1260 | cp_stop_hw(cp); | 1257 | cp_stop_hw(cp); |
1261 | cp_clean_rings(cp); | 1258 | cp_clean_rings(cp); |
1262 | rc = cp_init_rings(cp); | 1259 | rc = cp_init_rings(cp); |
1263 | cp_start_hw(cp); | 1260 | cp_start_hw(cp); |
1264 | cp_enable_irq(cp); | 1261 | __cp_set_rx_mode(dev); |
1262 | cpw16_f(IntrMask, cp_norx_intr_mask); | ||
1265 | 1263 | ||
1266 | netif_wake_queue(dev); | 1264 | netif_wake_queue(dev); |
1265 | napi_schedule_irqoff(&cp->napi); | ||
1267 | 1266 | ||
1268 | spin_unlock_irqrestore(&cp->lock, flags); | 1267 | spin_unlock_irqrestore(&cp->lock, flags); |
1269 | } | 1268 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) | |||
6081 | { | 6081 | { |
6082 | void __iomem *ioaddr = tp->mmio_addr; | 6082 | void __iomem *ioaddr = tp->mmio_addr; |
6083 | struct pci_dev *pdev = tp->pci_dev; | 6083 | struct pci_dev *pdev = tp->pci_dev; |
6084 | u16 rg_saw_cnt; | 6084 | int rg_saw_cnt; |
6085 | u32 data; | 6085 | u32 data; |
6086 | static const struct ephy_info e_info_8168h_1[] = { | 6086 | static const struct ephy_info e_info_8168h_1[] = { |
6087 | { 0x1e, 0x0800, 0x0001 }, | 6087 | { 0x1e, 0x0800, 0x0001 }, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b735fa22ac95..ebf6abc4853f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus) | |||
161 | 161 | ||
162 | if (!gpio_request(reset_gpio, "mdio-reset")) { | 162 | if (!gpio_request(reset_gpio, "mdio-reset")) { |
163 | gpio_direction_output(reset_gpio, active_low ? 1 : 0); | 163 | gpio_direction_output(reset_gpio, active_low ? 1 : 0); |
164 | udelay(data->delays[0]); | 164 | if (data->delays[0]) |
165 | msleep(DIV_ROUND_UP(data->delays[0], 1000)); | ||
166 | |||
165 | gpio_set_value(reset_gpio, active_low ? 0 : 1); | 167 | gpio_set_value(reset_gpio, active_low ? 0 : 1); |
166 | udelay(data->delays[1]); | 168 | if (data->delays[1]) |
169 | msleep(DIV_ROUND_UP(data->delays[1], 1000)); | ||
170 | |||
167 | gpio_set_value(reset_gpio, active_low ? 1 : 0); | 171 | gpio_set_value(reset_gpio, active_low ? 1 : 0); |
168 | udelay(data->delays[2]); | 172 | if (data->delays[2]) |
173 | msleep(DIV_ROUND_UP(data->delays[2], 1000)); | ||
169 | } | 174 | } |
170 | } | 175 | } |
171 | #endif | 176 | #endif |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 53fe200e0b79..cc106d892e29 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = { | |||
1756 | #endif | 1756 | #endif |
1757 | }; | 1757 | }; |
1758 | 1758 | ||
1759 | static struct vnet *vnet_new(const u64 *local_mac) | 1759 | static struct vnet *vnet_new(const u64 *local_mac, |
1760 | struct vio_dev *vdev) | ||
1760 | { | 1761 | { |
1761 | struct net_device *dev; | 1762 | struct net_device *dev; |
1762 | struct vnet *vp; | 1763 | struct vnet *vp; |
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac) | |||
1790 | NETIF_F_HW_CSUM | NETIF_F_SG; | 1791 | NETIF_F_HW_CSUM | NETIF_F_SG; |
1791 | dev->features = dev->hw_features; | 1792 | dev->features = dev->hw_features; |
1792 | 1793 | ||
1794 | SET_NETDEV_DEV(dev, &vdev->dev); | ||
1795 | |||
1793 | err = register_netdev(dev); | 1796 | err = register_netdev(dev); |
1794 | if (err) { | 1797 | if (err) { |
1795 | pr_err("Cannot register net device, aborting\n"); | 1798 | pr_err("Cannot register net device, aborting\n"); |
@@ -1808,7 +1811,8 @@ err_out_free_dev: | |||
1808 | return ERR_PTR(err); | 1811 | return ERR_PTR(err); |
1809 | } | 1812 | } |
1810 | 1813 | ||
1811 | static struct vnet *vnet_find_or_create(const u64 *local_mac) | 1814 | static struct vnet *vnet_find_or_create(const u64 *local_mac, |
1815 | struct vio_dev *vdev) | ||
1812 | { | 1816 | { |
1813 | struct vnet *iter, *vp; | 1817 | struct vnet *iter, *vp; |
1814 | 1818 | ||
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) | |||
1821 | } | 1825 | } |
1822 | } | 1826 | } |
1823 | if (!vp) | 1827 | if (!vp) |
1824 | vp = vnet_new(local_mac); | 1828 | vp = vnet_new(local_mac, vdev); |
1825 | mutex_unlock(&vnet_list_mutex); | 1829 | mutex_unlock(&vnet_list_mutex); |
1826 | 1830 | ||
1827 | return vp; | 1831 | return vp; |
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void) | |||
1848 | static const char *local_mac_prop = "local-mac-address"; | 1852 | static const char *local_mac_prop = "local-mac-address"; |
1849 | 1853 | ||
1850 | static struct vnet *vnet_find_parent(struct mdesc_handle *hp, | 1854 | static struct vnet *vnet_find_parent(struct mdesc_handle *hp, |
1851 | u64 port_node) | 1855 | u64 port_node, |
1856 | struct vio_dev *vdev) | ||
1852 | { | 1857 | { |
1853 | const u64 *local_mac = NULL; | 1858 | const u64 *local_mac = NULL; |
1854 | u64 a; | 1859 | u64 a; |
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp, | |||
1869 | if (!local_mac) | 1874 | if (!local_mac) |
1870 | return ERR_PTR(-ENODEV); | 1875 | return ERR_PTR(-ENODEV); |
1871 | 1876 | ||
1872 | return vnet_find_or_create(local_mac); | 1877 | return vnet_find_or_create(local_mac, vdev); |
1873 | } | 1878 | } |
1874 | 1879 | ||
1875 | static struct ldc_channel_config vnet_ldc_cfg = { | 1880 | static struct ldc_channel_config vnet_ldc_cfg = { |
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1923 | 1928 | ||
1924 | hp = mdesc_grab(); | 1929 | hp = mdesc_grab(); |
1925 | 1930 | ||
1926 | vp = vnet_find_parent(hp, vdev->mp); | 1931 | vp = vnet_find_parent(hp, vdev->mp, vdev); |
1927 | if (IS_ERR(vp)) { | 1932 | if (IS_ERR(vp)) { |
1928 | pr_err("Cannot find port parent vnet\n"); | 1933 | pr_err("Cannot find port parent vnet\n"); |
1929 | err = PTR_ERR(vp); | 1934 | err = PTR_ERR(vp); |
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1a5aca55ea9f..9f9832f0dea9 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
291 | interface_list) { | 291 | interface_list) { |
292 | struct netcp_intf_modpriv *intf_modpriv; | 292 | struct netcp_intf_modpriv *intf_modpriv; |
293 | 293 | ||
294 | /* If interface not registered then register now */ | ||
295 | if (!netcp_intf->netdev_registered) | ||
296 | ret = netcp_register_interface(netcp_intf); | ||
297 | |||
298 | if (ret) | ||
299 | return -ENODEV; | ||
300 | |||
301 | intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), | 294 | intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), |
302 | GFP_KERNEL); | 295 | GFP_KERNEL); |
303 | if (!intf_modpriv) | 296 | if (!intf_modpriv) |
@@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
306 | interface = of_parse_phandle(netcp_intf->node_interface, | 299 | interface = of_parse_phandle(netcp_intf->node_interface, |
307 | module->name, 0); | 300 | module->name, 0); |
308 | 301 | ||
302 | if (!interface) { | ||
303 | devm_kfree(dev, intf_modpriv); | ||
304 | continue; | ||
305 | } | ||
306 | |||
309 | intf_modpriv->netcp_priv = netcp_intf; | 307 | intf_modpriv->netcp_priv = netcp_intf; |
310 | intf_modpriv->netcp_module = module; | 308 | intf_modpriv->netcp_module = module; |
311 | list_add_tail(&intf_modpriv->intf_list, | 309 | list_add_tail(&intf_modpriv->intf_list, |
@@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
323 | continue; | 321 | continue; |
324 | } | 322 | } |
325 | } | 323 | } |
324 | |||
325 | /* Now register the interface with netdev */ | ||
326 | list_for_each_entry(netcp_intf, | ||
327 | &netcp_device->interface_head, | ||
328 | interface_list) { | ||
329 | /* If interface not registered then register now */ | ||
330 | if (!netcp_intf->netdev_registered) { | ||
331 | ret = netcp_register_interface(netcp_intf); | ||
332 | if (ret) | ||
333 | return -ENODEV; | ||
334 | } | ||
335 | } | ||
326 | return 0; | 336 | return 0; |
327 | } | 337 | } |
328 | 338 | ||
@@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module) | |||
357 | if (ret < 0) | 367 | if (ret < 0) |
358 | goto fail; | 368 | goto fail; |
359 | } | 369 | } |
360 | |||
361 | mutex_unlock(&netcp_modules_lock); | 370 | mutex_unlock(&netcp_modules_lock); |
362 | return 0; | 371 | return 0; |
363 | 372 | ||
@@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp) | |||
796 | netcp->rx_pool = NULL; | 805 | netcp->rx_pool = NULL; |
797 | } | 806 | } |
798 | 807 | ||
799 | static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | 808 | static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) |
800 | { | 809 | { |
801 | struct knav_dma_desc *hwdesc; | 810 | struct knav_dma_desc *hwdesc; |
802 | unsigned int buf_len, dma_sz; | 811 | unsigned int buf_len, dma_sz; |
@@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
810 | hwdesc = knav_pool_desc_get(netcp->rx_pool); | 819 | hwdesc = knav_pool_desc_get(netcp->rx_pool); |
811 | if (IS_ERR_OR_NULL(hwdesc)) { | 820 | if (IS_ERR_OR_NULL(hwdesc)) { |
812 | dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); | 821 | dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); |
813 | return; | 822 | return -ENOMEM; |
814 | } | 823 | } |
815 | 824 | ||
816 | if (likely(fdq == 0)) { | 825 | if (likely(fdq == 0)) { |
@@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
862 | knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, | 871 | knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, |
863 | &dma_sz); | 872 | &dma_sz); |
864 | knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); | 873 | knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); |
865 | return; | 874 | return 0; |
866 | 875 | ||
867 | fail: | 876 | fail: |
868 | knav_pool_desc_put(netcp->rx_pool, hwdesc); | 877 | knav_pool_desc_put(netcp->rx_pool, hwdesc); |
878 | return -ENOMEM; | ||
869 | } | 879 | } |
870 | 880 | ||
871 | /* Refill Rx FDQ with descriptors & attached buffers */ | 881 | /* Refill Rx FDQ with descriptors & attached buffers */ |
872 | static void netcp_rxpool_refill(struct netcp_intf *netcp) | 882 | static void netcp_rxpool_refill(struct netcp_intf *netcp) |
873 | { | 883 | { |
874 | u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; | 884 | u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; |
875 | int i; | 885 | int i, ret = 0; |
876 | 886 | ||
877 | /* Calculate the FDQ deficit and refill */ | 887 | /* Calculate the FDQ deficit and refill */ |
878 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { | 888 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { |
879 | fdq_deficit[i] = netcp->rx_queue_depths[i] - | 889 | fdq_deficit[i] = netcp->rx_queue_depths[i] - |
880 | knav_queue_get_count(netcp->rx_fdq[i]); | 890 | knav_queue_get_count(netcp->rx_fdq[i]); |
881 | 891 | ||
882 | while (fdq_deficit[i]--) | 892 | while (fdq_deficit[i]-- && !ret) |
883 | netcp_allocate_rx_buf(netcp, i); | 893 | ret = netcp_allocate_rx_buf(netcp, i); |
884 | } /* end for fdqs */ | 894 | } /* end for fdqs */ |
885 | } | 895 | } |
886 | 896 | ||
@@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) | |||
893 | 903 | ||
894 | packets = netcp_process_rx_packets(netcp, budget); | 904 | packets = netcp_process_rx_packets(netcp, budget); |
895 | 905 | ||
906 | netcp_rxpool_refill(netcp); | ||
896 | if (packets < budget) { | 907 | if (packets < budget) { |
897 | napi_complete(&netcp->rx_napi); | 908 | napi_complete(&netcp->rx_napi); |
898 | knav_queue_enable_notify(netcp->rx_queue); | 909 | knav_queue_enable_notify(netcp->rx_queue); |
899 | } | 910 | } |
900 | 911 | ||
901 | netcp_rxpool_refill(netcp); | ||
902 | return packets; | 912 | return packets; |
903 | } | 913 | } |
904 | 914 | ||
@@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) | |||
1384 | continue; | 1394 | continue; |
1385 | dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", | 1395 | dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", |
1386 | naddr->addr, naddr->type); | 1396 | naddr->addr, naddr->type); |
1387 | mutex_lock(&netcp_modules_lock); | ||
1388 | for_each_module(netcp, priv) { | 1397 | for_each_module(netcp, priv) { |
1389 | module = priv->netcp_module; | 1398 | module = priv->netcp_module; |
1390 | if (!module->del_addr) | 1399 | if (!module->del_addr) |
@@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) | |||
1393 | naddr); | 1402 | naddr); |
1394 | WARN_ON(error); | 1403 | WARN_ON(error); |
1395 | } | 1404 | } |
1396 | mutex_unlock(&netcp_modules_lock); | ||
1397 | netcp_addr_del(netcp, naddr); | 1405 | netcp_addr_del(netcp, naddr); |
1398 | } | 1406 | } |
1399 | } | 1407 | } |
@@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) | |||
1410 | continue; | 1418 | continue; |
1411 | dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", | 1419 | dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", |
1412 | naddr->addr, naddr->type); | 1420 | naddr->addr, naddr->type); |
1413 | mutex_lock(&netcp_modules_lock); | 1421 | |
1414 | for_each_module(netcp, priv) { | 1422 | for_each_module(netcp, priv) { |
1415 | module = priv->netcp_module; | 1423 | module = priv->netcp_module; |
1416 | if (!module->add_addr) | 1424 | if (!module->add_addr) |
@@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) | |||
1418 | error = module->add_addr(priv->module_priv, naddr); | 1426 | error = module->add_addr(priv->module_priv, naddr); |
1419 | WARN_ON(error); | 1427 | WARN_ON(error); |
1420 | } | 1428 | } |
1421 | mutex_unlock(&netcp_modules_lock); | ||
1422 | } | 1429 | } |
1423 | } | 1430 | } |
1424 | 1431 | ||
@@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) | |||
1432 | ndev->flags & IFF_ALLMULTI || | 1439 | ndev->flags & IFF_ALLMULTI || |
1433 | netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); | 1440 | netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); |
1434 | 1441 | ||
1442 | spin_lock(&netcp->lock); | ||
1435 | /* first clear all marks */ | 1443 | /* first clear all marks */ |
1436 | netcp_addr_clear_mark(netcp); | 1444 | netcp_addr_clear_mark(netcp); |
1437 | 1445 | ||
@@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) | |||
1450 | /* finally sweep and callout into modules */ | 1458 | /* finally sweep and callout into modules */ |
1451 | netcp_addr_sweep_del(netcp); | 1459 | netcp_addr_sweep_del(netcp); |
1452 | netcp_addr_sweep_add(netcp); | 1460 | netcp_addr_sweep_add(netcp); |
1461 | spin_unlock(&netcp->lock); | ||
1453 | } | 1462 | } |
1454 | 1463 | ||
1455 | static void netcp_free_navigator_resources(struct netcp_intf *netcp) | 1464 | static void netcp_free_navigator_resources(struct netcp_intf *netcp) |
@@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev) | |||
1614 | goto fail; | 1623 | goto fail; |
1615 | } | 1624 | } |
1616 | 1625 | ||
1617 | mutex_lock(&netcp_modules_lock); | ||
1618 | for_each_module(netcp, intf_modpriv) { | 1626 | for_each_module(netcp, intf_modpriv) { |
1619 | module = intf_modpriv->netcp_module; | 1627 | module = intf_modpriv->netcp_module; |
1620 | if (module->open) { | 1628 | if (module->open) { |
@@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev) | |||
1625 | } | 1633 | } |
1626 | } | 1634 | } |
1627 | } | 1635 | } |
1628 | mutex_unlock(&netcp_modules_lock); | ||
1629 | 1636 | ||
1630 | napi_enable(&netcp->rx_napi); | 1637 | napi_enable(&netcp->rx_napi); |
1631 | napi_enable(&netcp->tx_napi); | 1638 | napi_enable(&netcp->tx_napi); |
@@ -1642,7 +1649,6 @@ fail_open: | |||
1642 | if (module->close) | 1649 | if (module->close) |
1643 | module->close(intf_modpriv->module_priv, ndev); | 1650 | module->close(intf_modpriv->module_priv, ndev); |
1644 | } | 1651 | } |
1645 | mutex_unlock(&netcp_modules_lock); | ||
1646 | 1652 | ||
1647 | fail: | 1653 | fail: |
1648 | netcp_free_navigator_resources(netcp); | 1654 | netcp_free_navigator_resources(netcp); |
@@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev) | |||
1666 | napi_disable(&netcp->rx_napi); | 1672 | napi_disable(&netcp->rx_napi); |
1667 | napi_disable(&netcp->tx_napi); | 1673 | napi_disable(&netcp->tx_napi); |
1668 | 1674 | ||
1669 | mutex_lock(&netcp_modules_lock); | ||
1670 | for_each_module(netcp, intf_modpriv) { | 1675 | for_each_module(netcp, intf_modpriv) { |
1671 | module = intf_modpriv->netcp_module; | 1676 | module = intf_modpriv->netcp_module; |
1672 | if (module->close) { | 1677 | if (module->close) { |
@@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev) | |||
1675 | dev_err(netcp->ndev_dev, "Close failed\n"); | 1680 | dev_err(netcp->ndev_dev, "Close failed\n"); |
1676 | } | 1681 | } |
1677 | } | 1682 | } |
1678 | mutex_unlock(&netcp_modules_lock); | ||
1679 | 1683 | ||
1680 | /* Recycle Rx descriptors from completion queue */ | 1684 | /* Recycle Rx descriptors from completion queue */ |
1681 | netcp_empty_rx_queue(netcp); | 1685 | netcp_empty_rx_queue(netcp); |
@@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, | |||
1703 | if (!netif_running(ndev)) | 1707 | if (!netif_running(ndev)) |
1704 | return -EINVAL; | 1708 | return -EINVAL; |
1705 | 1709 | ||
1706 | mutex_lock(&netcp_modules_lock); | ||
1707 | for_each_module(netcp, intf_modpriv) { | 1710 | for_each_module(netcp, intf_modpriv) { |
1708 | module = intf_modpriv->netcp_module; | 1711 | module = intf_modpriv->netcp_module; |
1709 | if (!module->ioctl) | 1712 | if (!module->ioctl) |
@@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, | |||
1719 | } | 1722 | } |
1720 | 1723 | ||
1721 | out: | 1724 | out: |
1722 | mutex_unlock(&netcp_modules_lock); | ||
1723 | return (ret == 0) ? 0 : err; | 1725 | return (ret == 0) ? 0 : err; |
1724 | } | 1726 | } |
1725 | 1727 | ||
@@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) | |||
1754 | struct netcp_intf *netcp = netdev_priv(ndev); | 1756 | struct netcp_intf *netcp = netdev_priv(ndev); |
1755 | struct netcp_intf_modpriv *intf_modpriv; | 1757 | struct netcp_intf_modpriv *intf_modpriv; |
1756 | struct netcp_module *module; | 1758 | struct netcp_module *module; |
1759 | unsigned long flags; | ||
1757 | int err = 0; | 1760 | int err = 0; |
1758 | 1761 | ||
1759 | dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); | 1762 | dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); |
1760 | 1763 | ||
1761 | mutex_lock(&netcp_modules_lock); | 1764 | spin_lock_irqsave(&netcp->lock, flags); |
1762 | for_each_module(netcp, intf_modpriv) { | 1765 | for_each_module(netcp, intf_modpriv) { |
1763 | module = intf_modpriv->netcp_module; | 1766 | module = intf_modpriv->netcp_module; |
1764 | if ((module->add_vid) && (vid != 0)) { | 1767 | if ((module->add_vid) && (vid != 0)) { |
@@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) | |||
1770 | } | 1773 | } |
1771 | } | 1774 | } |
1772 | } | 1775 | } |
1773 | mutex_unlock(&netcp_modules_lock); | 1776 | spin_unlock_irqrestore(&netcp->lock, flags); |
1777 | |||
1774 | return err; | 1778 | return err; |
1775 | } | 1779 | } |
1776 | 1780 | ||
@@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) | |||
1779 | struct netcp_intf *netcp = netdev_priv(ndev); | 1783 | struct netcp_intf *netcp = netdev_priv(ndev); |
1780 | struct netcp_intf_modpriv *intf_modpriv; | 1784 | struct netcp_intf_modpriv *intf_modpriv; |
1781 | struct netcp_module *module; | 1785 | struct netcp_module *module; |
1786 | unsigned long flags; | ||
1782 | int err = 0; | 1787 | int err = 0; |
1783 | 1788 | ||
1784 | dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); | 1789 | dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); |
1785 | 1790 | ||
1786 | mutex_lock(&netcp_modules_lock); | 1791 | spin_lock_irqsave(&netcp->lock, flags); |
1787 | for_each_module(netcp, intf_modpriv) { | 1792 | for_each_module(netcp, intf_modpriv) { |
1788 | module = intf_modpriv->netcp_module; | 1793 | module = intf_modpriv->netcp_module; |
1789 | if (module->del_vid) { | 1794 | if (module->del_vid) { |
@@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) | |||
1795 | } | 1800 | } |
1796 | } | 1801 | } |
1797 | } | 1802 | } |
1798 | mutex_unlock(&netcp_modules_lock); | 1803 | spin_unlock_irqrestore(&netcp->lock, flags); |
1799 | return err; | 1804 | return err; |
1800 | } | 1805 | } |
1801 | 1806 | ||
@@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev) | |||
2040 | struct device_node *child, *interfaces; | 2045 | struct device_node *child, *interfaces; |
2041 | struct netcp_device *netcp_device; | 2046 | struct netcp_device *netcp_device; |
2042 | struct device *dev = &pdev->dev; | 2047 | struct device *dev = &pdev->dev; |
2043 | struct netcp_module *module; | ||
2044 | int ret; | 2048 | int ret; |
2045 | 2049 | ||
2046 | if (!node) { | 2050 | if (!node) { |
@@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev) | |||
2087 | /* Add the device instance to the list */ | 2091 | /* Add the device instance to the list */ |
2088 | list_add_tail(&netcp_device->device_list, &netcp_devices); | 2092 | list_add_tail(&netcp_device->device_list, &netcp_devices); |
2089 | 2093 | ||
2090 | /* Probe & attach any modules already registered */ | ||
2091 | mutex_lock(&netcp_modules_lock); | ||
2092 | for_each_netcp_module(module) { | ||
2093 | ret = netcp_module_probe(netcp_device, module); | ||
2094 | if (ret < 0) | ||
2095 | dev_err(dev, "module(%s) probe failed\n", module->name); | ||
2096 | } | ||
2097 | mutex_unlock(&netcp_modules_lock); | ||
2098 | return 0; | 2094 | return 0; |
2099 | 2095 | ||
2100 | probe_quit_interface: | 2096 | probe_quit_interface: |
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 6f16d6aaf7b7..6bff8d82ceab 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #define GBENU_ALE_OFFSET 0x1e000 | 77 | #define GBENU_ALE_OFFSET 0x1e000 |
78 | #define GBENU_HOST_PORT_NUM 0 | 78 | #define GBENU_HOST_PORT_NUM 0 |
79 | #define GBENU_NUM_ALE_ENTRIES 1024 | 79 | #define GBENU_NUM_ALE_ENTRIES 1024 |
80 | #define GBENU_SGMII_MODULE_SIZE 0x100 | ||
80 | 81 | ||
81 | /* 10G Ethernet SS defines */ | 82 | /* 10G Ethernet SS defines */ |
82 | #define XGBE_MODULE_NAME "netcp-xgbe" | 83 | #define XGBE_MODULE_NAME "netcp-xgbe" |
@@ -149,8 +150,8 @@ | |||
149 | #define XGBE_STATS2_MODULE 2 | 150 | #define XGBE_STATS2_MODULE 2 |
150 | 151 | ||
151 | /* s: 0-based slave_port */ | 152 | /* s: 0-based slave_port */ |
152 | #define SGMII_BASE(s) \ | 153 | #define SGMII_BASE(d, s) \ |
153 | (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) | 154 | (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs) |
154 | 155 | ||
155 | #define GBE_TX_QUEUE 648 | 156 | #define GBE_TX_QUEUE 648 |
156 | #define GBE_TXHOOK_ORDER 0 | 157 | #define GBE_TXHOOK_ORDER 0 |
@@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, | |||
1997 | return; | 1998 | return; |
1998 | 1999 | ||
1999 | if (!SLAVE_LINK_IS_XGMII(slave)) { | 2000 | if (!SLAVE_LINK_IS_XGMII(slave)) { |
2000 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) | 2001 | sgmii_link_state = |
2001 | sgmii_link_state = | 2002 | netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); |
2002 | netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); | ||
2003 | else | ||
2004 | sgmii_link_state = | ||
2005 | netcp_sgmii_get_port_link( | ||
2006 | gbe_dev->sgmii_port_regs, sp); | ||
2007 | } | 2003 | } |
2008 | 2004 | ||
2009 | phy_link_state = gbe_phy_link_status(slave); | 2005 | phy_link_state = gbe_phy_link_status(slave); |
@@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, | |||
2100 | static void gbe_sgmii_rtreset(struct gbe_priv *priv, | 2096 | static void gbe_sgmii_rtreset(struct gbe_priv *priv, |
2101 | struct gbe_slave *slave, bool set) | 2097 | struct gbe_slave *slave, bool set) |
2102 | { | 2098 | { |
2103 | void __iomem *sgmii_port_regs; | ||
2104 | |||
2105 | if (SLAVE_LINK_IS_XGMII(slave)) | 2099 | if (SLAVE_LINK_IS_XGMII(slave)) |
2106 | return; | 2100 | return; |
2107 | 2101 | ||
2108 | if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) | 2102 | netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num), |
2109 | sgmii_port_regs = priv->sgmii_port34_regs; | 2103 | slave->slave_num, set); |
2110 | else | ||
2111 | sgmii_port_regs = priv->sgmii_port_regs; | ||
2112 | |||
2113 | netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); | ||
2114 | } | 2104 | } |
2115 | 2105 | ||
2116 | static void gbe_slave_stop(struct gbe_intf *intf) | 2106 | static void gbe_slave_stop(struct gbe_intf *intf) |
@@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf) | |||
2136 | 2126 | ||
2137 | static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) | 2127 | static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) |
2138 | { | 2128 | { |
2139 | void __iomem *sgmii_port_regs; | 2129 | if (SLAVE_LINK_IS_XGMII(slave)) |
2140 | 2130 | return; | |
2141 | sgmii_port_regs = priv->sgmii_port_regs; | ||
2142 | if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) | ||
2143 | sgmii_port_regs = priv->sgmii_port34_regs; | ||
2144 | 2131 | ||
2145 | if (!SLAVE_LINK_IS_XGMII(slave)) { | 2132 | netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num); |
2146 | netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); | 2133 | netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num, |
2147 | netcp_sgmii_config(sgmii_port_regs, slave->slave_num, | 2134 | slave->link_interface); |
2148 | slave->link_interface); | ||
2149 | } | ||
2150 | } | 2135 | } |
2151 | 2136 | ||
2152 | static int gbe_slave_open(struct gbe_intf *gbe_intf) | 2137 | static int gbe_slave_open(struct gbe_intf *gbe_intf) |
@@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, | |||
2997 | gbe_dev->switch_regs = regs; | 2982 | gbe_dev->switch_regs = regs; |
2998 | 2983 | ||
2999 | gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; | 2984 | gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; |
2985 | |||
2986 | /* Although sgmii modules are mem mapped to one contiguous | ||
2987 | * region on GBENU devices, setting sgmii_port34_regs allows | ||
2988 | * consistent code when accessing sgmii api | ||
2989 | */ | ||
2990 | gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs + | ||
2991 | (2 * GBENU_SGMII_MODULE_SIZE); | ||
2992 | |||
3000 | gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; | 2993 | gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; |
3001 | 2994 | ||
3002 | for (i = 0; i < (gbe_dev->max_num_ports); i++) | 2995 | for (i = 0; i < (gbe_dev->max_num_ports); i++) |
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 2f1264b882b9..d3d094742a7e 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig | |||
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA | |||
17 | 17 | ||
18 | config VIA_RHINE | 18 | config VIA_RHINE |
19 | tristate "VIA Rhine support" | 19 | tristate "VIA Rhine support" |
20 | depends on (PCI || OF_IRQ) | 20 | depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP) |
21 | depends on HAS_DMA | 21 | depends on HAS_DMA |
22 | select CRC32 | 22 | select CRC32 |
23 | select MII | 23 | select MII |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 6008eee01a33..cf468c87ce57 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) | |||
828 | if (!phydev) | 828 | if (!phydev) |
829 | dev_info(dev, | 829 | dev_info(dev, |
830 | "MDIO of the phy is not registered yet\n"); | 830 | "MDIO of the phy is not registered yet\n"); |
831 | else | ||
832 | put_device(&phydev->dev); | ||
831 | return 0; | 833 | return 0; |
832 | } | 834 | } |
833 | 835 | ||
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index b5f4a78da828..2d3848c9dc35 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c | |||
@@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work) | |||
1011 | set_bit(epidx, &irq_bit); | 1011 | set_bit(epidx, &irq_bit); |
1012 | break; | 1012 | break; |
1013 | } | 1013 | } |
1014 | } | ||
1015 | |||
1016 | hw->ep_shm_info[epidx].es_status = info[epidx].es_status; | ||
1017 | hw->ep_shm_info[epidx].zone = info[epidx].zone; | ||
1018 | 1014 | ||
1015 | hw->ep_shm_info[epidx].es_status = | ||
1016 | info[epidx].es_status; | ||
1017 | hw->ep_shm_info[epidx].zone = info[epidx].zone; | ||
1018 | } | ||
1019 | break; | 1019 | break; |
1020 | } | 1020 | } |
1021 | 1021 | ||
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index da3259ce7c8d..8f5c02eed47d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) | |||
126 | __be32 addr; | 126 | __be32 addr; |
127 | int err; | 127 | int err; |
128 | 128 | ||
129 | iph = ip_hdr(skb); /* outer IP header... */ | ||
130 | |||
129 | if (gs->collect_md) { | 131 | if (gs->collect_md) { |
130 | static u8 zero_vni[3]; | 132 | static u8 zero_vni[3]; |
131 | 133 | ||
@@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) | |||
133 | addr = 0; | 135 | addr = 0; |
134 | } else { | 136 | } else { |
135 | vni = gnvh->vni; | 137 | vni = gnvh->vni; |
136 | iph = ip_hdr(skb); /* Still outer IP header... */ | ||
137 | addr = iph->saddr; | 138 | addr = iph->saddr; |
138 | } | 139 | } |
139 | 140 | ||
@@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) | |||
178 | 179 | ||
179 | skb_reset_network_header(skb); | 180 | skb_reset_network_header(skb); |
180 | 181 | ||
181 | iph = ip_hdr(skb); /* Now inner IP header... */ | ||
182 | err = IP_ECN_decapsulate(iph, skb); | 182 | err = IP_ECN_decapsulate(iph, skb); |
183 | 183 | ||
184 | if (unlikely(err)) { | 184 | if (unlikely(err)) { |
@@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) | |||
626 | struct geneve_sock *gs = geneve->sock; | 626 | struct geneve_sock *gs = geneve->sock; |
627 | struct ip_tunnel_info *info = NULL; | 627 | struct ip_tunnel_info *info = NULL; |
628 | struct rtable *rt = NULL; | 628 | struct rtable *rt = NULL; |
629 | const struct iphdr *iip; /* interior IP header */ | ||
629 | struct flowi4 fl4; | 630 | struct flowi4 fl4; |
630 | __u8 tos, ttl; | 631 | __u8 tos, ttl; |
631 | __be16 sport; | 632 | __be16 sport; |
@@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) | |||
653 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); | 654 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
654 | skb_reset_mac_header(skb); | 655 | skb_reset_mac_header(skb); |
655 | 656 | ||
657 | iip = ip_hdr(skb); | ||
658 | |||
656 | if (info) { | 659 | if (info) { |
657 | const struct ip_tunnel_key *key = &info->key; | 660 | const struct ip_tunnel_key *key = &info->key; |
658 | u8 *opts = NULL; | 661 | u8 *opts = NULL; |
@@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) | |||
668 | if (unlikely(err)) | 671 | if (unlikely(err)) |
669 | goto err; | 672 | goto err; |
670 | 673 | ||
671 | tos = key->tos; | 674 | tos = ip_tunnel_ecn_encap(key->tos, iip, skb); |
672 | ttl = key->ttl; | 675 | ttl = key->ttl; |
673 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; | 676 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
674 | } else { | 677 | } else { |
675 | const struct iphdr *iip; /* interior IP header */ | ||
676 | |||
677 | udp_csum = false; | 678 | udp_csum = false; |
678 | err = geneve_build_skb(rt, skb, 0, geneve->vni, | 679 | err = geneve_build_skb(rt, skb, 0, geneve->vni, |
679 | 0, NULL, udp_csum); | 680 | 0, NULL, udp_csum); |
680 | if (unlikely(err)) | 681 | if (unlikely(err)) |
681 | goto err; | 682 | goto err; |
682 | 683 | ||
683 | iip = ip_hdr(skb); | ||
684 | tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); | 684 | tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); |
685 | ttl = geneve->ttl; | 685 | ttl = geneve->ttl; |
686 | if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) | 686 | if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) |
@@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev) | |||
748 | dev->features |= NETIF_F_RXCSUM; | 748 | dev->features |= NETIF_F_RXCSUM; |
749 | dev->features |= NETIF_F_GSO_SOFTWARE; | 749 | dev->features |= NETIF_F_GSO_SOFTWARE; |
750 | 750 | ||
751 | dev->vlan_features = dev->features; | ||
752 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; | ||
753 | |||
754 | dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; | 751 | dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
755 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; | 752 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; |
756 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; | ||
757 | 753 | ||
758 | netif_keep_dst(dev); | 754 | netif_keep_dst(dev); |
759 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; | 755 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; |
@@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, | |||
819 | 815 | ||
820 | static int geneve_configure(struct net *net, struct net_device *dev, | 816 | static int geneve_configure(struct net *net, struct net_device *dev, |
821 | __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, | 817 | __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, |
822 | __u16 dst_port, bool metadata) | 818 | __be16 dst_port, bool metadata) |
823 | { | 819 | { |
824 | struct geneve_net *gn = net_generic(net, geneve_net_id); | 820 | struct geneve_net *gn = net_generic(net, geneve_net_id); |
825 | struct geneve_dev *t, *geneve = netdev_priv(dev); | 821 | struct geneve_dev *t, *geneve = netdev_priv(dev); |
@@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev, | |||
844 | 840 | ||
845 | geneve->ttl = ttl; | 841 | geneve->ttl = ttl; |
846 | geneve->tos = tos; | 842 | geneve->tos = tos; |
847 | geneve->dst_port = htons(dst_port); | 843 | geneve->dst_port = dst_port; |
848 | geneve->collect_md = metadata; | 844 | geneve->collect_md = metadata; |
849 | 845 | ||
850 | t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, | 846 | t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, |
851 | &tun_on_same_port, &tun_collect_md); | 847 | &tun_on_same_port, &tun_collect_md); |
852 | if (t) | 848 | if (t) |
853 | return -EBUSY; | 849 | return -EBUSY; |
@@ -871,7 +867,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, | |||
871 | static int geneve_newlink(struct net *net, struct net_device *dev, | 867 | static int geneve_newlink(struct net *net, struct net_device *dev, |
872 | struct nlattr *tb[], struct nlattr *data[]) | 868 | struct nlattr *tb[], struct nlattr *data[]) |
873 | { | 869 | { |
874 | __u16 dst_port = GENEVE_UDP_PORT; | 870 | __be16 dst_port = htons(GENEVE_UDP_PORT); |
875 | __u8 ttl = 0, tos = 0; | 871 | __u8 ttl = 0, tos = 0; |
876 | bool metadata = false; | 872 | bool metadata = false; |
877 | __be32 rem_addr; | 873 | __be32 rem_addr; |
@@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, | |||
890 | tos = nla_get_u8(data[IFLA_GENEVE_TOS]); | 886 | tos = nla_get_u8(data[IFLA_GENEVE_TOS]); |
891 | 887 | ||
892 | if (data[IFLA_GENEVE_PORT]) | 888 | if (data[IFLA_GENEVE_PORT]) |
893 | dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); | 889 | dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); |
894 | 890 | ||
895 | if (data[IFLA_GENEVE_COLLECT_METADATA]) | 891 | if (data[IFLA_GENEVE_COLLECT_METADATA]) |
896 | metadata = true; | 892 | metadata = true; |
@@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev) | |||
913 | nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ | 909 | nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ |
914 | nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ | 910 | nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ |
915 | nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ | 911 | nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ |
916 | nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ | 912 | nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ |
917 | nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ | 913 | nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ |
918 | 0; | 914 | 0; |
919 | } | 915 | } |
@@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
935 | nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) | 931 | nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) |
936 | goto nla_put_failure; | 932 | goto nla_put_failure; |
937 | 933 | ||
938 | if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) | 934 | if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) |
939 | goto nla_put_failure; | 935 | goto nla_put_failure; |
940 | 936 | ||
941 | if (geneve->collect_md) { | 937 | if (geneve->collect_md) { |
@@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, | |||
975 | if (IS_ERR(dev)) | 971 | if (IS_ERR(dev)) |
976 | return dev; | 972 | return dev; |
977 | 973 | ||
978 | err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); | 974 | err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); |
979 | if (err) { | 975 | if (err) { |
980 | free_netdev(dev); | 976 | free_netdev(dev); |
981 | return ERR_PTR(err); | 977 | return ERR_PTR(err); |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 58ae11a14bb6..64bb44d5d867 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) | |||
1031 | static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) | 1031 | static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) |
1032 | { | 1032 | { |
1033 | struct ali_ircc_cb *self = priv; | 1033 | struct ali_ircc_cb *self = priv; |
1034 | unsigned long flags; | ||
1035 | int iobase; | 1034 | int iobase; |
1036 | int fcr; /* FIFO control reg */ | 1035 | int fcr; /* FIFO control reg */ |
1037 | int lcr; /* Line control reg */ | 1036 | int lcr; /* Line control reg */ |
@@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) | |||
1061 | /* Update accounting for new speed */ | 1060 | /* Update accounting for new speed */ |
1062 | self->io.speed = speed; | 1061 | self->io.speed = speed; |
1063 | 1062 | ||
1064 | spin_lock_irqsave(&self->lock, flags); | ||
1065 | |||
1066 | divisor = 115200/speed; | 1063 | divisor = 115200/speed; |
1067 | 1064 | ||
1068 | fcr = UART_FCR_ENABLE_FIFO; | 1065 | fcr = UART_FCR_ENABLE_FIFO; |
@@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) | |||
1089 | /* without this, the connection will be broken after come back from FIR speed, | 1086 | /* without this, the connection will be broken after come back from FIR speed, |
1090 | but with this, the SIR connection is harder to established */ | 1087 | but with this, the SIR connection is harder to established */ |
1091 | outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); | 1088 | outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); |
1092 | |||
1093 | spin_unlock_irqrestore(&self->lock, flags); | ||
1094 | |||
1095 | } | 1089 | } |
1096 | 1090 | ||
1097 | static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) | 1091 | static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index edd77342773a..248478c6f6e4 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
1111 | return 0; | 1111 | return 0; |
1112 | 1112 | ||
1113 | case TUNSETSNDBUF: | 1113 | case TUNSETSNDBUF: |
1114 | if (get_user(u, up)) | 1114 | if (get_user(s, sp)) |
1115 | return -EFAULT; | 1115 | return -EFAULT; |
1116 | 1116 | ||
1117 | q->sk.sk_sndbuf = u; | 1117 | q->sk.sk_sndbuf = s; |
1118 | return 0; | 1118 | return 0; |
1119 | 1119 | ||
1120 | case TUNGETVNETHDRSZ: | 1120 | case TUNGETVNETHDRSZ: |
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index fb1299c6326e..e23bf5b90e17 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c | |||
@@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev, | |||
220 | struct fixed_mdio_bus *fmb = &platform_fmb; | 220 | struct fixed_mdio_bus *fmb = &platform_fmb; |
221 | struct fixed_phy *fp; | 221 | struct fixed_phy *fp; |
222 | 222 | ||
223 | if (!phydev || !phydev->bus) | 223 | if (!phydev || phydev->bus != fmb->mii_bus) |
224 | return -EINVAL; | 224 | return -EINVAL; |
225 | 225 | ||
226 | list_for_each_entry(fp, &fmb->phys, node) { | 226 | list_for_each_entry(fp, &fmb->phys, node) { |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e6897b6a8a53..5de8d5827536 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev) | |||
785 | int adv; | 785 | int adv; |
786 | int err; | 786 | int err; |
787 | int lpa; | 787 | int lpa; |
788 | int lpagb; | ||
788 | int status = 0; | 789 | int status = 0; |
789 | 790 | ||
790 | /* Update the link, but return if there | 791 | /* Update the link, but return if there |
@@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev) | |||
802 | if (lpa < 0) | 803 | if (lpa < 0) |
803 | return lpa; | 804 | return lpa; |
804 | 805 | ||
806 | lpagb = phy_read(phydev, MII_STAT1000); | ||
807 | if (lpagb < 0) | ||
808 | return lpagb; | ||
809 | |||
805 | adv = phy_read(phydev, MII_ADVERTISE); | 810 | adv = phy_read(phydev, MII_ADVERTISE); |
806 | if (adv < 0) | 811 | if (adv < 0) |
807 | return adv; | 812 | return adv; |
808 | 813 | ||
814 | phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | | ||
815 | mii_lpa_to_ethtool_lpa_t(lpa); | ||
816 | |||
809 | lpa &= adv; | 817 | lpa &= adv; |
810 | 818 | ||
811 | if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) | 819 | if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) |
@@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev) | |||
853 | phydev->speed = SPEED_10; | 861 | phydev->speed = SPEED_10; |
854 | 862 | ||
855 | phydev->pause = phydev->asym_pause = 0; | 863 | phydev->pause = phydev->asym_pause = 0; |
864 | phydev->lp_advertising = 0; | ||
856 | } | 865 | } |
857 | 866 | ||
858 | return 0; | 867 | return 0; |
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 6a52a7f0fa0d..4bde5e728fe0 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c | |||
@@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = { | |||
244 | { .compatible = "brcm,unimac-mdio", }, | 244 | { .compatible = "brcm,unimac-mdio", }, |
245 | { /* sentinel */ }, | 245 | { /* sentinel */ }, |
246 | }; | 246 | }; |
247 | MODULE_DEVICE_TABLE(of, unimac_mdio_ids); | ||
247 | 248 | ||
248 | static struct platform_driver unimac_mdio_driver = { | 249 | static struct platform_driver unimac_mdio_driver = { |
249 | .driver = { | 250 | .driver = { |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7dc21e56a7aa..3bc9f03349f3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = { | |||
261 | { .compatible = "virtual,mdio-gpio", }, | 261 | { .compatible = "virtual,mdio-gpio", }, |
262 | { /* sentinel */ } | 262 | { /* sentinel */ } |
263 | }; | 263 | }; |
264 | MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); | ||
264 | 265 | ||
265 | static struct platform_driver mdio_gpio_driver = { | 266 | static struct platform_driver mdio_gpio_driver = { |
266 | .probe = mdio_gpio_probe, | 267 | .probe = mdio_gpio_probe, |
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 4d4d25efc1e1..280c7c311f72 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c | |||
@@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev, | |||
113 | if (!parent_bus_node) | 113 | if (!parent_bus_node) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | parent_bus = of_mdio_find_bus(parent_bus_node); | ||
117 | if (parent_bus == NULL) { | ||
118 | ret_val = -EPROBE_DEFER; | ||
119 | goto err_parent_bus; | ||
120 | } | ||
121 | |||
122 | pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); | 116 | pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); |
123 | if (pb == NULL) { | 117 | if (pb == NULL) { |
124 | ret_val = -ENOMEM; | 118 | ret_val = -ENOMEM; |
125 | goto err_parent_bus; | 119 | goto err_parent_bus; |
126 | } | 120 | } |
127 | 121 | ||
122 | parent_bus = of_mdio_find_bus(parent_bus_node); | ||
123 | if (parent_bus == NULL) { | ||
124 | ret_val = -EPROBE_DEFER; | ||
125 | goto err_parent_bus; | ||
126 | } | ||
127 | |||
128 | pb->switch_data = data; | 128 | pb->switch_data = data; |
129 | pb->switch_fn = switch_fn; | 129 | pb->switch_fn = switch_fn; |
130 | pb->current_child = -1; | 130 | pb->current_child = -1; |
@@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev, | |||
173 | dev_info(dev, "Version " DRV_VERSION "\n"); | 173 | dev_info(dev, "Version " DRV_VERSION "\n"); |
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | |||
177 | /* balance the reference of_mdio_find_bus() took */ | ||
178 | put_device(&pb->mii_bus->dev); | ||
179 | |||
176 | err_parent_bus: | 180 | err_parent_bus: |
177 | of_node_put(parent_bus_node); | 181 | of_node_put(parent_bus_node); |
178 | return ret_val; | 182 | return ret_val; |
@@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle) | |||
189 | mdiobus_free(cb->mii_bus); | 193 | mdiobus_free(cb->mii_bus); |
190 | cb = cb->next; | 194 | cb = cb->next; |
191 | } | 195 | } |
196 | |||
197 | /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */ | ||
198 | put_device(&pb->mii_bus->dev); | ||
192 | } | 199 | } |
193 | EXPORT_SYMBOL_GPL(mdio_mux_uninit); | 200 | EXPORT_SYMBOL_GPL(mdio_mux_uninit); |
194 | 201 | ||
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 02a4615b65f8..12f44c53cc8e 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) | |||
167 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. | 167 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. |
168 | * @mdio_bus_np: Pointer to the mii_bus. | 168 | * @mdio_bus_np: Pointer to the mii_bus. |
169 | * | 169 | * |
170 | * Returns a pointer to the mii_bus, or NULL if none found. | 170 | * Returns a reference to the mii_bus, or NULL if none found. The |
171 | * embedded struct device will have its reference count incremented, | ||
172 | * and this must be put once the bus is finished with. | ||
171 | * | 173 | * |
172 | * Because the association of a device_node and mii_bus is made via | 174 | * Because the association of a device_node and mii_bus is made via |
173 | * of_mdiobus_register(), the mii_bus cannot be found before it is | 175 | * of_mdiobus_register(), the mii_bus cannot be found before it is |
@@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, | |||
234 | #endif | 236 | #endif |
235 | 237 | ||
236 | /** | 238 | /** |
237 | * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus | 239 | * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus |
238 | * @bus: target mii_bus | 240 | * @bus: target mii_bus |
241 | * @owner: module containing bus accessor functions | ||
239 | * | 242 | * |
240 | * Description: Called by a bus driver to bring up all the PHYs | 243 | * Description: Called by a bus driver to bring up all the PHYs |
241 | * on a given bus, and attach them to the bus. | 244 | * on a given bus, and attach them to the bus. Drivers should use |
245 | * mdiobus_register() rather than __mdiobus_register() unless they | ||
246 | * need to pass a specific owner module. | ||
242 | * | 247 | * |
243 | * Returns 0 on success or < 0 on error. | 248 | * Returns 0 on success or < 0 on error. |
244 | */ | 249 | */ |
245 | int mdiobus_register(struct mii_bus *bus) | 250 | int __mdiobus_register(struct mii_bus *bus, struct module *owner) |
246 | { | 251 | { |
247 | int i, err; | 252 | int i, err; |
248 | 253 | ||
@@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus) | |||
253 | BUG_ON(bus->state != MDIOBUS_ALLOCATED && | 258 | BUG_ON(bus->state != MDIOBUS_ALLOCATED && |
254 | bus->state != MDIOBUS_UNREGISTERED); | 259 | bus->state != MDIOBUS_UNREGISTERED); |
255 | 260 | ||
261 | bus->owner = owner; | ||
256 | bus->dev.parent = bus->parent; | 262 | bus->dev.parent = bus->parent; |
257 | bus->dev.class = &mdio_bus_class; | 263 | bus->dev.class = &mdio_bus_class; |
258 | bus->dev.groups = NULL; | 264 | bus->dev.groups = NULL; |
@@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus) | |||
288 | 294 | ||
289 | error: | 295 | error: |
290 | while (--i >= 0) { | 296 | while (--i >= 0) { |
291 | if (bus->phy_map[i]) | 297 | struct phy_device *phydev = bus->phy_map[i]; |
292 | device_unregister(&bus->phy_map[i]->dev); | 298 | if (phydev) { |
299 | phy_device_remove(phydev); | ||
300 | phy_device_free(phydev); | ||
301 | } | ||
293 | } | 302 | } |
294 | device_del(&bus->dev); | 303 | device_del(&bus->dev); |
295 | return err; | 304 | return err; |
296 | } | 305 | } |
297 | EXPORT_SYMBOL(mdiobus_register); | 306 | EXPORT_SYMBOL(__mdiobus_register); |
298 | 307 | ||
299 | void mdiobus_unregister(struct mii_bus *bus) | 308 | void mdiobus_unregister(struct mii_bus *bus) |
300 | { | 309 | { |
@@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus) | |||
304 | bus->state = MDIOBUS_UNREGISTERED; | 313 | bus->state = MDIOBUS_UNREGISTERED; |
305 | 314 | ||
306 | for (i = 0; i < PHY_MAX_ADDR; i++) { | 315 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
307 | if (bus->phy_map[i]) | 316 | struct phy_device *phydev = bus->phy_map[i]; |
308 | device_unregister(&bus->phy_map[i]->dev); | 317 | if (phydev) { |
309 | bus->phy_map[i] = NULL; | 318 | phy_device_remove(phydev); |
319 | phy_device_free(phydev); | ||
320 | } | ||
310 | } | 321 | } |
311 | device_del(&bus->dev); | 322 | device_del(&bus->dev); |
312 | } | 323 | } |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c0f211127274..f761288abe66 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev) | |||
384 | EXPORT_SYMBOL(phy_device_register); | 384 | EXPORT_SYMBOL(phy_device_register); |
385 | 385 | ||
386 | /** | 386 | /** |
387 | * phy_device_remove - Remove a previously registered phy device from the MDIO bus | ||
388 | * @phydev: phy_device structure to remove | ||
389 | * | ||
390 | * This doesn't free the phy_device itself, it merely reverses the effects | ||
391 | * of phy_device_register(). Use phy_device_free() to free the device | ||
392 | * after calling this function. | ||
393 | */ | ||
394 | void phy_device_remove(struct phy_device *phydev) | ||
395 | { | ||
396 | struct mii_bus *bus = phydev->bus; | ||
397 | int addr = phydev->addr; | ||
398 | |||
399 | device_del(&phydev->dev); | ||
400 | bus->phy_map[addr] = NULL; | ||
401 | } | ||
402 | EXPORT_SYMBOL(phy_device_remove); | ||
403 | |||
404 | /** | ||
387 | * phy_find_first - finds the first PHY device on the bus | 405 | * phy_find_first - finds the first PHY device on the bus |
388 | * @bus: the target MII bus | 406 | * @bus: the target MII bus |
389 | */ | 407 | */ |
@@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw); | |||
578 | * generic driver is used. The phy_device is given a ptr to | 596 | * generic driver is used. The phy_device is given a ptr to |
579 | * the attaching device, and given a callback for link status | 597 | * the attaching device, and given a callback for link status |
580 | * change. The phy_device is returned to the attaching driver. | 598 | * change. The phy_device is returned to the attaching driver. |
599 | * This function takes a reference on the phy device. | ||
581 | */ | 600 | */ |
582 | int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | 601 | int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, |
583 | u32 flags, phy_interface_t interface) | 602 | u32 flags, phy_interface_t interface) |
584 | { | 603 | { |
604 | struct mii_bus *bus = phydev->bus; | ||
585 | struct device *d = &phydev->dev; | 605 | struct device *d = &phydev->dev; |
586 | struct module *bus_module; | ||
587 | int err; | 606 | int err; |
588 | 607 | ||
608 | if (!try_module_get(bus->owner)) { | ||
609 | dev_err(&dev->dev, "failed to get the bus module\n"); | ||
610 | return -EIO; | ||
611 | } | ||
612 | |||
613 | get_device(d); | ||
614 | |||
589 | /* Assume that if there is no driver, that it doesn't | 615 | /* Assume that if there is no driver, that it doesn't |
590 | * exist, and we should use the genphy driver. | 616 | * exist, and we should use the genphy driver. |
591 | */ | 617 | */ |
@@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
600 | err = device_bind_driver(d); | 626 | err = device_bind_driver(d); |
601 | 627 | ||
602 | if (err) | 628 | if (err) |
603 | return err; | 629 | goto error; |
604 | } | 630 | } |
605 | 631 | ||
606 | if (phydev->attached_dev) { | 632 | if (phydev->attached_dev) { |
607 | dev_err(&dev->dev, "PHY already attached\n"); | 633 | dev_err(&dev->dev, "PHY already attached\n"); |
608 | return -EBUSY; | 634 | err = -EBUSY; |
609 | } | 635 | goto error; |
610 | |||
611 | /* Increment the bus module reference count */ | ||
612 | bus_module = phydev->bus->dev.driver ? | ||
613 | phydev->bus->dev.driver->owner : NULL; | ||
614 | if (!try_module_get(bus_module)) { | ||
615 | dev_err(&dev->dev, "failed to get the bus module\n"); | ||
616 | return -EIO; | ||
617 | } | 636 | } |
618 | 637 | ||
619 | phydev->attached_dev = dev; | 638 | phydev->attached_dev = dev; |
@@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
636 | phy_resume(phydev); | 655 | phy_resume(phydev); |
637 | 656 | ||
638 | return err; | 657 | return err; |
658 | |||
659 | error: | ||
660 | put_device(d); | ||
661 | module_put(bus->owner); | ||
662 | return err; | ||
639 | } | 663 | } |
640 | EXPORT_SYMBOL(phy_attach_direct); | 664 | EXPORT_SYMBOL(phy_attach_direct); |
641 | 665 | ||
@@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach); | |||
677 | /** | 701 | /** |
678 | * phy_detach - detach a PHY device from its network device | 702 | * phy_detach - detach a PHY device from its network device |
679 | * @phydev: target phy_device struct | 703 | * @phydev: target phy_device struct |
704 | * | ||
705 | * This detaches the phy device from its network device and the phy | ||
706 | * driver, and drops the reference count taken in phy_attach_direct(). | ||
680 | */ | 707 | */ |
681 | void phy_detach(struct phy_device *phydev) | 708 | void phy_detach(struct phy_device *phydev) |
682 | { | 709 | { |
710 | struct mii_bus *bus; | ||
683 | int i; | 711 | int i; |
684 | 712 | ||
685 | if (phydev->bus->dev.driver) | ||
686 | module_put(phydev->bus->dev.driver->owner); | ||
687 | |||
688 | phydev->attached_dev->phydev = NULL; | 713 | phydev->attached_dev->phydev = NULL; |
689 | phydev->attached_dev = NULL; | 714 | phydev->attached_dev = NULL; |
690 | phy_suspend(phydev); | 715 | phy_suspend(phydev); |
@@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev) | |||
700 | break; | 725 | break; |
701 | } | 726 | } |
702 | } | 727 | } |
728 | |||
729 | /* | ||
730 | * The phydev might go away on the put_device() below, so avoid | ||
731 | * a use-after-free bug by reading the underlying bus first. | ||
732 | */ | ||
733 | bus = phydev->bus; | ||
734 | |||
735 | put_device(&phydev->dev); | ||
736 | module_put(bus->owner); | ||
703 | } | 737 | } |
704 | EXPORT_SYMBOL(phy_detach); | 738 | EXPORT_SYMBOL(phy_detach); |
705 | 739 | ||
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 17cad185169d..76cad712ddb2 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
@@ -66,7 +66,6 @@ | |||
66 | #define PHY_ID_VSC8244 0x000fc6c0 | 66 | #define PHY_ID_VSC8244 0x000fc6c0 |
67 | #define PHY_ID_VSC8514 0x00070670 | 67 | #define PHY_ID_VSC8514 0x00070670 |
68 | #define PHY_ID_VSC8574 0x000704a0 | 68 | #define PHY_ID_VSC8574 0x000704a0 |
69 | #define PHY_ID_VSC8641 0x00070431 | ||
70 | #define PHY_ID_VSC8662 0x00070660 | 69 | #define PHY_ID_VSC8662 0x00070660 |
71 | #define PHY_ID_VSC8221 0x000fc550 | 70 | #define PHY_ID_VSC8221 0x000fc550 |
72 | #define PHY_ID_VSC8211 0x000fc4b0 | 71 | #define PHY_ID_VSC8211 0x000fc4b0 |
@@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = { | |||
273 | .config_intr = &vsc82xx_config_intr, | 272 | .config_intr = &vsc82xx_config_intr, |
274 | .driver = { .owner = THIS_MODULE,}, | 273 | .driver = { .owner = THIS_MODULE,}, |
275 | }, { | 274 | }, { |
276 | .phy_id = PHY_ID_VSC8641, | ||
277 | .name = "Vitesse VSC8641", | ||
278 | .phy_id_mask = 0x000ffff0, | ||
279 | .features = PHY_GBIT_FEATURES, | ||
280 | .flags = PHY_HAS_INTERRUPT, | ||
281 | .config_init = &vsc824x_config_init, | ||
282 | .config_aneg = &vsc82x4_config_aneg, | ||
283 | .read_status = &genphy_read_status, | ||
284 | .ack_interrupt = &vsc824x_ack_interrupt, | ||
285 | .config_intr = &vsc82xx_config_intr, | ||
286 | .driver = { .owner = THIS_MODULE,}, | ||
287 | }, { | ||
288 | .phy_id = PHY_ID_VSC8662, | 275 | .phy_id = PHY_ID_VSC8662, |
289 | .name = "Vitesse VSC8662", | 276 | .name = "Vitesse VSC8662", |
290 | .phy_id_mask = 0x000ffff0, | 277 | .phy_id_mask = 0x000ffff0, |
@@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { | |||
331 | { PHY_ID_VSC8244, 0x000fffc0 }, | 318 | { PHY_ID_VSC8244, 0x000fffc0 }, |
332 | { PHY_ID_VSC8514, 0x000ffff0 }, | 319 | { PHY_ID_VSC8514, 0x000ffff0 }, |
333 | { PHY_ID_VSC8574, 0x000ffff0 }, | 320 | { PHY_ID_VSC8574, 0x000ffff0 }, |
334 | { PHY_ID_VSC8641, 0x000ffff0 }, | ||
335 | { PHY_ID_VSC8662, 0x000ffff0 }, | 321 | { PHY_ID_VSC8662, 0x000ffff0 }, |
336 | { PHY_ID_VSC8221, 0x000ffff0 }, | 322 | { PHY_ID_VSC8221, 0x000ffff0 }, |
337 | { PHY_ID_VSC8211, 0x000ffff0 }, | 323 | { PHY_ID_VSC8211, 0x000ffff0 }, |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0481daf9201a..ed00446759b2 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, | |||
2755 | */ | 2755 | */ |
2756 | dev_net_set(dev, net); | 2756 | dev_net_set(dev, net); |
2757 | 2757 | ||
2758 | rtnl_lock(); | ||
2758 | mutex_lock(&pn->all_ppp_mutex); | 2759 | mutex_lock(&pn->all_ppp_mutex); |
2759 | 2760 | ||
2760 | if (unit < 0) { | 2761 | if (unit < 0) { |
@@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, | |||
2785 | ppp->file.index = unit; | 2786 | ppp->file.index = unit; |
2786 | sprintf(dev->name, "ppp%d", unit); | 2787 | sprintf(dev->name, "ppp%d", unit); |
2787 | 2788 | ||
2788 | ret = register_netdev(dev); | 2789 | ret = register_netdevice(dev); |
2789 | if (ret != 0) { | 2790 | if (ret != 0) { |
2790 | unit_put(&pn->units_idr, unit); | 2791 | unit_put(&pn->units_idr, unit); |
2791 | netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", | 2792 | netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", |
@@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, | |||
2797 | 2798 | ||
2798 | atomic_inc(&ppp_unit_count); | 2799 | atomic_inc(&ppp_unit_count); |
2799 | mutex_unlock(&pn->all_ppp_mutex); | 2800 | mutex_unlock(&pn->all_ppp_mutex); |
2801 | rtnl_unlock(); | ||
2800 | 2802 | ||
2801 | *retp = 0; | 2803 | *retp = 0; |
2802 | return ppp; | 2804 | return ppp; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 1610b79ae386..fbb9325d1f6e 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -583,4 +583,15 @@ config USB_VL600 | |||
583 | 583 | ||
584 | http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 | 584 | http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 |
585 | 585 | ||
586 | config USB_NET_CH9200 | ||
587 | tristate "QingHeng CH9200 USB ethernet support" | ||
588 | depends on USB_USBNET | ||
589 | select MII | ||
590 | help | ||
591 | Choose this option if you have a USB ethernet adapter with a QinHeng | ||
592 | CH9200 chipset. | ||
593 | |||
594 | To compile this driver as a module, choose M here: the | ||
595 | module will be called ch9200. | ||
596 | |||
586 | endif # USB_NET_DRIVERS | 597 | endif # USB_NET_DRIVERS |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index cf6a0e610a7f..b5f04068dbe4 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o | |||
38 | obj-$(CONFIG_USB_VL600) += lg-vl600.o | 38 | obj-$(CONFIG_USB_VL600) += lg-vl600.o |
39 | obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o | 39 | obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o |
40 | obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o | 40 | obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o |
41 | 41 | obj-$(CONFIG_USB_NET_CH9200) += ch9200.o | |
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c new file mode 100644 index 000000000000..5e151e6a3e09 --- /dev/null +++ b/drivers/net/usb/ch9200.c | |||
@@ -0,0 +1,432 @@ | |||
1 | /* | ||
2 | * USB 10M/100M ethernet adapter | ||
3 | * | ||
4 | * This file is licensed under the terms of the GNU General Public License | ||
5 | * version 2. This program is licensed "as is" without any warranty of any | ||
6 | * kind, whether express or implied | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/netdevice.h> | ||
16 | #include <linux/etherdevice.h> | ||
17 | #include <linux/ethtool.h> | ||
18 | #include <linux/mii.h> | ||
19 | #include <linux/usb.h> | ||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/usb/usbnet.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #define CH9200_VID 0x1A86 | ||
25 | #define CH9200_PID_E092 0xE092 | ||
26 | |||
27 | #define CTRL_TIMEOUT_MS 1000 | ||
28 | |||
29 | #define CONTROL_TIMEOUT_MS 1000 | ||
30 | |||
31 | #define REQUEST_READ 0x0E | ||
32 | #define REQUEST_WRITE 0x0F | ||
33 | |||
34 | /* Address space: | ||
35 | * 00-63 : MII | ||
36 | * 64-128: MAC | ||
37 | * | ||
38 | * Note: all accesses must be 16-bit | ||
39 | */ | ||
40 | |||
41 | #define MAC_REG_CTRL 64 | ||
42 | #define MAC_REG_STATUS 66 | ||
43 | #define MAC_REG_INTERRUPT_MASK 68 | ||
44 | #define MAC_REG_PHY_COMMAND 70 | ||
45 | #define MAC_REG_PHY_DATA 72 | ||
46 | #define MAC_REG_STATION_L 74 | ||
47 | #define MAC_REG_STATION_M 76 | ||
48 | #define MAC_REG_STATION_H 78 | ||
49 | #define MAC_REG_HASH_L 80 | ||
50 | #define MAC_REG_HASH_M1 82 | ||
51 | #define MAC_REG_HASH_M2 84 | ||
52 | #define MAC_REG_HASH_H 86 | ||
53 | #define MAC_REG_THRESHOLD 88 | ||
54 | #define MAC_REG_FIFO_DEPTH 90 | ||
55 | #define MAC_REG_PAUSE 92 | ||
56 | #define MAC_REG_FLOW_CONTROL 94 | ||
57 | |||
58 | /* Control register bits | ||
59 | * | ||
60 | * Note: bits 13 and 15 are reserved | ||
61 | */ | ||
62 | #define LOOPBACK (0x01 << 14) | ||
63 | #define BASE100X (0x01 << 12) | ||
64 | #define MBPS_10 (0x01 << 11) | ||
65 | #define DUPLEX_MODE (0x01 << 10) | ||
66 | #define PAUSE_FRAME (0x01 << 9) | ||
67 | #define PROMISCUOUS (0x01 << 8) | ||
68 | #define MULTICAST (0x01 << 7) | ||
69 | #define BROADCAST (0x01 << 6) | ||
70 | #define HASH (0x01 << 5) | ||
71 | #define APPEND_PAD (0x01 << 4) | ||
72 | #define APPEND_CRC (0x01 << 3) | ||
73 | #define TRANSMITTER_ACTION (0x01 << 2) | ||
74 | #define RECEIVER_ACTION (0x01 << 1) | ||
75 | #define DMA_ACTION (0x01 << 0) | ||
76 | |||
77 | /* Status register bits | ||
78 | * | ||
79 | * Note: bits 7-15 are reserved | ||
80 | */ | ||
81 | #define ALIGNMENT (0x01 << 6) | ||
82 | #define FIFO_OVER_RUN (0x01 << 5) | ||
83 | #define FIFO_UNDER_RUN (0x01 << 4) | ||
84 | #define RX_ERROR (0x01 << 3) | ||
85 | #define RX_COMPLETE (0x01 << 2) | ||
86 | #define TX_ERROR (0x01 << 1) | ||
87 | #define TX_COMPLETE (0x01 << 0) | ||
88 | |||
89 | /* FIFO depth register bits | ||
90 | * | ||
91 | * Note: bits 6 and 14 are reserved | ||
92 | */ | ||
93 | |||
94 | #define ETH_TXBD (0x01 << 15) | ||
95 | #define ETN_TX_FIFO_DEPTH (0x01 << 8) | ||
96 | #define ETH_RXBD (0x01 << 7) | ||
97 | #define ETH_RX_FIFO_DEPTH (0x01 << 0) | ||
98 | |||
99 | static int control_read(struct usbnet *dev, | ||
100 | unsigned char request, unsigned short value, | ||
101 | unsigned short index, void *data, unsigned short size, | ||
102 | int timeout) | ||
103 | { | ||
104 | unsigned char *buf = NULL; | ||
105 | unsigned char request_type; | ||
106 | int err = 0; | ||
107 | |||
108 | if (request == REQUEST_READ) | ||
109 | request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER); | ||
110 | else | ||
111 | request_type = (USB_DIR_IN | USB_TYPE_VENDOR | | ||
112 | USB_RECIP_DEVICE); | ||
113 | |||
114 | netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n", | ||
115 | index, size); | ||
116 | |||
117 | buf = kmalloc(size, GFP_KERNEL); | ||
118 | if (!buf) { | ||
119 | err = -ENOMEM; | ||
120 | goto err_out; | ||
121 | } | ||
122 | |||
123 | err = usb_control_msg(dev->udev, | ||
124 | usb_rcvctrlpipe(dev->udev, 0), | ||
125 | request, request_type, value, index, buf, size, | ||
126 | timeout); | ||
127 | if (err == size) | ||
128 | memcpy(data, buf, size); | ||
129 | else if (err >= 0) | ||
130 | err = -EINVAL; | ||
131 | kfree(buf); | ||
132 | |||
133 | return err; | ||
134 | |||
135 | err_out: | ||
136 | return err; | ||
137 | } | ||
138 | |||
139 | static int control_write(struct usbnet *dev, unsigned char request, | ||
140 | unsigned short value, unsigned short index, | ||
141 | void *data, unsigned short size, int timeout) | ||
142 | { | ||
143 | unsigned char *buf = NULL; | ||
144 | unsigned char request_type; | ||
145 | int err = 0; | ||
146 | |||
147 | if (request == REQUEST_WRITE) | ||
148 | request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | | ||
149 | USB_RECIP_OTHER); | ||
150 | else | ||
151 | request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | | ||
152 | USB_RECIP_DEVICE); | ||
153 | |||
154 | netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n", | ||
155 | index, size); | ||
156 | |||
157 | if (data) { | ||
158 | buf = kmalloc(size, GFP_KERNEL); | ||
159 | if (!buf) { | ||
160 | err = -ENOMEM; | ||
161 | goto err_out; | ||
162 | } | ||
163 | memcpy(buf, data, size); | ||
164 | } | ||
165 | |||
166 | err = usb_control_msg(dev->udev, | ||
167 | usb_sndctrlpipe(dev->udev, 0), | ||
168 | request, request_type, value, index, buf, size, | ||
169 | timeout); | ||
170 | if (err >= 0 && err < size) | ||
171 | err = -EINVAL; | ||
172 | kfree(buf); | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | err_out: | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) | ||
181 | { | ||
182 | struct usbnet *dev = netdev_priv(netdev); | ||
183 | unsigned char buff[2]; | ||
184 | |||
185 | netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n", | ||
186 | phy_id, loc); | ||
187 | |||
188 | if (phy_id != 0) | ||
189 | return -ENODEV; | ||
190 | |||
191 | control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, | ||
192 | CONTROL_TIMEOUT_MS); | ||
193 | |||
194 | return (buff[0] | buff[1] << 8); | ||
195 | } | ||
196 | |||
197 | static void ch9200_mdio_write(struct net_device *netdev, | ||
198 | int phy_id, int loc, int val) | ||
199 | { | ||
200 | struct usbnet *dev = netdev_priv(netdev); | ||
201 | unsigned char buff[2]; | ||
202 | |||
203 | netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n", | ||
204 | phy_id, loc); | ||
205 | |||
206 | if (phy_id != 0) | ||
207 | return; | ||
208 | |||
209 | buff[0] = (unsigned char)val; | ||
210 | buff[1] = (unsigned char)(val >> 8); | ||
211 | |||
212 | control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02, | ||
213 | CONTROL_TIMEOUT_MS); | ||
214 | } | ||
215 | |||
216 | static int ch9200_link_reset(struct usbnet *dev) | ||
217 | { | ||
218 | struct ethtool_cmd ecmd; | ||
219 | |||
220 | mii_check_media(&dev->mii, 1, 1); | ||
221 | mii_ethtool_gset(&dev->mii, &ecmd); | ||
222 | |||
223 | netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n", | ||
224 | ecmd.speed, ecmd.duplex); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static void ch9200_status(struct usbnet *dev, struct urb *urb) | ||
230 | { | ||
231 | int link; | ||
232 | unsigned char *buf; | ||
233 | |||
234 | if (urb->actual_length < 16) | ||
235 | return; | ||
236 | |||
237 | buf = urb->transfer_buffer; | ||
238 | link = !!(buf[0] & 0x01); | ||
239 | |||
240 | if (link) { | ||
241 | netif_carrier_on(dev->net); | ||
242 | usbnet_defer_kevent(dev, EVENT_LINK_RESET); | ||
243 | } else { | ||
244 | netif_carrier_off(dev->net); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | ||
249 | gfp_t flags) | ||
250 | { | ||
251 | int i = 0; | ||
252 | int len = 0; | ||
253 | int tx_overhead = 0; | ||
254 | |||
255 | tx_overhead = 0x40; | ||
256 | |||
257 | len = skb->len; | ||
258 | if (skb_headroom(skb) < tx_overhead) { | ||
259 | struct sk_buff *skb2; | ||
260 | |||
261 | skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); | ||
262 | dev_kfree_skb_any(skb); | ||
263 | skb = skb2; | ||
264 | if (!skb) | ||
265 | return NULL; | ||
266 | } | ||
267 | |||
268 | __skb_push(skb, tx_overhead); | ||
269 | /* usbnet adds padding if length is a multiple of packet size | ||
270 | * if so, adjust length value in header | ||
271 | */ | ||
272 | if ((skb->len % dev->maxpacket) == 0) | ||
273 | len++; | ||
274 | |||
275 | skb->data[0] = len; | ||
276 | skb->data[1] = len >> 8; | ||
277 | skb->data[2] = 0x00; | ||
278 | skb->data[3] = 0x80; | ||
279 | |||
280 | for (i = 4; i < 48; i++) | ||
281 | skb->data[i] = 0x00; | ||
282 | |||
283 | skb->data[48] = len; | ||
284 | skb->data[49] = len >> 8; | ||
285 | skb->data[50] = 0x00; | ||
286 | skb->data[51] = 0x80; | ||
287 | |||
288 | for (i = 52; i < 64; i++) | ||
289 | skb->data[i] = 0x00; | ||
290 | |||
291 | return skb; | ||
292 | } | ||
293 | |||
294 | static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
295 | { | ||
296 | int len = 0; | ||
297 | int rx_overhead = 0; | ||
298 | |||
299 | rx_overhead = 64; | ||
300 | |||
301 | if (unlikely(skb->len < rx_overhead)) { | ||
302 | dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8); | ||
307 | skb_trim(skb, len); | ||
308 | |||
309 | return 1; | ||
310 | } | ||
311 | |||
312 | static int get_mac_address(struct usbnet *dev, unsigned char *data) | ||
313 | { | ||
314 | int err = 0; | ||
315 | unsigned char mac_addr[0x06]; | ||
316 | int rd_mac_len = 0; | ||
317 | |||
318 | netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", | ||
319 | dev->udev->descriptor.idVendor, | ||
320 | dev->udev->descriptor.idProduct); | ||
321 | |||
322 | memset(mac_addr, 0, sizeof(mac_addr)); | ||
323 | rd_mac_len = control_read(dev, REQUEST_READ, 0, | ||
324 | MAC_REG_STATION_L, mac_addr, 0x02, | ||
325 | CONTROL_TIMEOUT_MS); | ||
326 | rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M, | ||
327 | mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS); | ||
328 | rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H, | ||
329 | mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS); | ||
330 | if (rd_mac_len != ETH_ALEN) | ||
331 | err = -EINVAL; | ||
332 | |||
333 | data[0] = mac_addr[5]; | ||
334 | data[1] = mac_addr[4]; | ||
335 | data[2] = mac_addr[3]; | ||
336 | data[3] = mac_addr[2]; | ||
337 | data[4] = mac_addr[1]; | ||
338 | data[5] = mac_addr[0]; | ||
339 | |||
340 | return err; | ||
341 | } | ||
342 | |||
343 | static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) | ||
344 | { | ||
345 | int retval = 0; | ||
346 | unsigned char data[2]; | ||
347 | |||
348 | retval = usbnet_get_endpoints(dev, intf); | ||
349 | if (retval) | ||
350 | return retval; | ||
351 | |||
352 | dev->mii.dev = dev->net; | ||
353 | dev->mii.mdio_read = ch9200_mdio_read; | ||
354 | dev->mii.mdio_write = ch9200_mdio_write; | ||
355 | dev->mii.reg_num_mask = 0x1f; | ||
356 | |||
357 | dev->mii.phy_id_mask = 0x1f; | ||
358 | |||
359 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
360 | dev->rx_urb_size = 24 * 64 + 16; | ||
361 | mii_nway_restart(&dev->mii); | ||
362 | |||
363 | data[0] = 0x01; | ||
364 | data[1] = 0x0F; | ||
365 | retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data, | ||
366 | 0x02, CONTROL_TIMEOUT_MS); | ||
367 | |||
368 | data[0] = 0xA0; | ||
369 | data[1] = 0x90; | ||
370 | retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data, | ||
371 | 0x02, CONTROL_TIMEOUT_MS); | ||
372 | |||
373 | data[0] = 0x30; | ||
374 | data[1] = 0x00; | ||
375 | retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data, | ||
376 | 0x02, CONTROL_TIMEOUT_MS); | ||
377 | |||
378 | data[0] = 0x17; | ||
379 | data[1] = 0xD8; | ||
380 | retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL, | ||
381 | data, 0x02, CONTROL_TIMEOUT_MS); | ||
382 | |||
383 | /* Undocumented register */ | ||
384 | data[0] = 0x01; | ||
385 | data[1] = 0x00; | ||
386 | retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02, | ||
387 | CONTROL_TIMEOUT_MS); | ||
388 | |||
389 | data[0] = 0x5F; | ||
390 | data[1] = 0x0D; | ||
391 | retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02, | ||
392 | CONTROL_TIMEOUT_MS); | ||
393 | |||
394 | retval = get_mac_address(dev, dev->net->dev_addr); | ||
395 | |||
396 | return retval; | ||
397 | } | ||
398 | |||
399 | static const struct driver_info ch9200_info = { | ||
400 | .description = "CH9200 USB to Network Adaptor", | ||
401 | .flags = FLAG_ETHER, | ||
402 | .bind = ch9200_bind, | ||
403 | .rx_fixup = ch9200_rx_fixup, | ||
404 | .tx_fixup = ch9200_tx_fixup, | ||
405 | .status = ch9200_status, | ||
406 | .link_reset = ch9200_link_reset, | ||
407 | .reset = ch9200_link_reset, | ||
408 | }; | ||
409 | |||
410 | static const struct usb_device_id ch9200_products[] = { | ||
411 | { | ||
412 | USB_DEVICE(0x1A86, 0xE092), | ||
413 | .driver_info = (unsigned long)&ch9200_info, | ||
414 | }, | ||
415 | {}, | ||
416 | }; | ||
417 | |||
418 | MODULE_DEVICE_TABLE(usb, ch9200_products); | ||
419 | |||
420 | static struct usb_driver ch9200_driver = { | ||
421 | .name = "ch9200", | ||
422 | .id_table = ch9200_products, | ||
423 | .probe = usbnet_probe, | ||
424 | .disconnect = usbnet_disconnect, | ||
425 | .suspend = usbnet_suspend, | ||
426 | .resume = usbnet_resume, | ||
427 | }; | ||
428 | |||
429 | module_usb_driver(ch9200_driver); | ||
430 | |||
431 | MODULE_DESCRIPTION("QinHeng CH9200 USB Network device"); | ||
432 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index e7094fbd7568..488c6f50df73 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, | |||
193 | .flowi4_oif = vrf_dev->ifindex, | 193 | .flowi4_oif = vrf_dev->ifindex, |
194 | .flowi4_iif = LOOPBACK_IFINDEX, | 194 | .flowi4_iif = LOOPBACK_IFINDEX, |
195 | .flowi4_tos = RT_TOS(ip4h->tos), | 195 | .flowi4_tos = RT_TOS(ip4h->tos), |
196 | .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, | 196 | .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC | |
197 | FLOWI_FLAG_SKIP_NH_OIF, | ||
197 | .daddr = ip4h->daddr, | 198 | .daddr = ip4h->daddr, |
198 | }; | 199 | }; |
199 | 200 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index cf8b7f0473b3..bbac1d35ed4e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev) | |||
2392 | 2392 | ||
2393 | eth_hw_addr_random(dev); | 2393 | eth_hw_addr_random(dev); |
2394 | ether_setup(dev); | 2394 | ether_setup(dev); |
2395 | if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) | ||
2396 | dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; | ||
2397 | else | ||
2398 | dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; | ||
2399 | 2395 | ||
2400 | dev->netdev_ops = &vxlan_netdev_ops; | 2396 | dev->netdev_ops = &vxlan_netdev_ops; |
2401 | dev->destructor = free_netdev; | 2397 | dev->destructor = free_netdev; |
@@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2640 | dst->remote_ip.sa.sa_family = AF_INET; | 2636 | dst->remote_ip.sa.sa_family = AF_INET; |
2641 | 2637 | ||
2642 | if (dst->remote_ip.sa.sa_family == AF_INET6 || | 2638 | if (dst->remote_ip.sa.sa_family == AF_INET6 || |
2643 | vxlan->cfg.saddr.sa.sa_family == AF_INET6) | 2639 | vxlan->cfg.saddr.sa.sa_family == AF_INET6) { |
2640 | if (!IS_ENABLED(CONFIG_IPV6)) | ||
2641 | return -EPFNOSUPPORT; | ||
2644 | use_ipv6 = true; | 2642 | use_ipv6 = true; |
2643 | } | ||
2645 | 2644 | ||
2646 | if (conf->remote_ifindex) { | 2645 | if (conf->remote_ifindex) { |
2647 | struct net_device *lowerdev | 2646 | struct net_device *lowerdev |
@@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2670 | 2669 | ||
2671 | dev->needed_headroom = lowerdev->hard_header_len + | 2670 | dev->needed_headroom = lowerdev->hard_header_len + |
2672 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2671 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
2673 | } else if (use_ipv6) | 2672 | } else if (use_ipv6) { |
2674 | vxlan->flags |= VXLAN_F_IPV6; | 2673 | vxlan->flags |= VXLAN_F_IPV6; |
2674 | dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; | ||
2675 | } else { | ||
2676 | dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; | ||
2677 | } | ||
2675 | 2678 | ||
2676 | memcpy(&vxlan->cfg, conf, sizeof(*conf)); | 2679 | memcpy(&vxlan->cfg, conf, sizeof(*conf)); |
2677 | if (!vxlan->cfg.dst_port) | 2680 | if (!vxlan->cfg.dst_port) |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 59ad54a63d9f..cb477518dd0e 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
@@ -128,13 +128,13 @@ static ssize_t namespace_store(struct device *dev, | |||
128 | struct nd_btt *nd_btt = to_nd_btt(dev); | 128 | struct nd_btt *nd_btt = to_nd_btt(dev); |
129 | ssize_t rc; | 129 | ssize_t rc; |
130 | 130 | ||
131 | nvdimm_bus_lock(dev); | ||
132 | device_lock(dev); | 131 | device_lock(dev); |
132 | nvdimm_bus_lock(dev); | ||
133 | rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); | 133 | rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); |
134 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | 134 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, |
135 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | 135 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); |
136 | device_unlock(dev); | ||
137 | nvdimm_bus_unlock(dev); | 136 | nvdimm_bus_unlock(dev); |
137 | device_unlock(dev); | ||
138 | 138 | ||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3fd7d0d81a47..71805a1aa0f3 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -148,13 +148,13 @@ static ssize_t namespace_store(struct device *dev, | |||
148 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | 148 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
149 | ssize_t rc; | 149 | ssize_t rc; |
150 | 150 | ||
151 | nvdimm_bus_lock(dev); | ||
152 | device_lock(dev); | 151 | device_lock(dev); |
152 | nvdimm_bus_lock(dev); | ||
153 | rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); | 153 | rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); |
154 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | 154 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, |
155 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | 155 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); |
156 | device_unlock(dev); | ||
157 | nvdimm_bus_unlock(dev); | 156 | nvdimm_bus_unlock(dev); |
157 | device_unlock(dev); | ||
158 | 158 | ||
159 | return rc; | 159 | return rc; |
160 | } | 160 | } |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b9525385c0dc..0ba6a978f227 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -92,6 +92,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |||
92 | struct pmem_device *pmem = bdev->bd_disk->private_data; | 92 | struct pmem_device *pmem = bdev->bd_disk->private_data; |
93 | 93 | ||
94 | pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); | 94 | pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); |
95 | if (rw & WRITE) | ||
96 | wmb_pmem(); | ||
95 | page_endio(page, rw & WRITE, 0); | 97 | page_endio(page, rw & WRITE, 0); |
96 | 98 | ||
97 | return 0; | 99 | return 0; |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d3c6676b3c0c..6fd4e5a5ef4a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
@@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | |||
67 | int rc; | 67 | int rc; |
68 | 68 | ||
69 | /* Stop the user from reading */ | 69 | /* Stop the user from reading */ |
70 | if (pos > nvmem->size) | 70 | if (pos >= nvmem->size) |
71 | return 0; | 71 | return 0; |
72 | 72 | ||
73 | if (pos + count > nvmem->size) | 73 | if (pos + count > nvmem->size) |
@@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
92 | int rc; | 92 | int rc; |
93 | 93 | ||
94 | /* Stop the user from writing */ | 94 | /* Stop the user from writing */ |
95 | if (pos > nvmem->size) | 95 | if (pos >= nvmem->size) |
96 | return 0; | 96 | return 0; |
97 | 97 | ||
98 | if (pos + count > nvmem->size) | 98 | if (pos + count > nvmem->size) |
@@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, | |||
825 | return rc; | 825 | return rc; |
826 | 826 | ||
827 | /* shift bits in-place */ | 827 | /* shift bits in-place */ |
828 | if (cell->bit_offset || cell->bit_offset) | 828 | if (cell->bit_offset || cell->nbits) |
829 | nvmem_shift_read_buffer_in_place(cell, buf); | 829 | nvmem_shift_read_buffer_in_place(cell, buf); |
830 | 830 | ||
831 | *len = cell->bytes; | 831 | *len = cell->bytes; |
@@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) | |||
938 | rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); | 938 | rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); |
939 | 939 | ||
940 | /* free the tmp buffer */ | 940 | /* free the tmp buffer */ |
941 | if (cell->bit_offset) | 941 | if (cell->bit_offset || cell->nbits) |
942 | kfree(buf); | 942 | kfree(buf); |
943 | 943 | ||
944 | if (IS_ERR_VALUE(rc)) | 944 | if (IS_ERR_VALUE(rc)) |
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index 14777dd5212d..cfa3b85064dd 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c | |||
@@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
103 | struct nvmem_device *nvmem; | 103 | struct nvmem_device *nvmem; |
104 | struct regmap *regmap; | 104 | struct regmap *regmap; |
105 | struct sunxi_sid *sid; | 105 | struct sunxi_sid *sid; |
106 | int i, size; | 106 | int ret, i, size; |
107 | char *randomness; | 107 | char *randomness; |
108 | 108 | ||
109 | sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); | 109 | sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); |
@@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
131 | return PTR_ERR(nvmem); | 131 | return PTR_ERR(nvmem); |
132 | 132 | ||
133 | randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); | 133 | randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); |
134 | if (!randomness) { | ||
135 | ret = -EINVAL; | ||
136 | goto err_unreg_nvmem; | ||
137 | } | ||
138 | |||
134 | for (i = 0; i < size; i++) | 139 | for (i = 0; i < size; i++) |
135 | randomness[i] = sunxi_sid_read_byte(sid, i); | 140 | randomness[i] = sunxi_sid_read_byte(sid, i); |
136 | 141 | ||
@@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
140 | platform_set_drvdata(pdev, nvmem); | 145 | platform_set_drvdata(pdev, nvmem); |
141 | 146 | ||
142 | return 0; | 147 | return 0; |
148 | |||
149 | err_unreg_nvmem: | ||
150 | nvmem_unregister(nvmem); | ||
151 | return ret; | ||
143 | } | 152 | } |
144 | 153 | ||
145 | static int sunxi_sid_remove(struct platform_device *pdev) | 154 | static int sunxi_sid_remove(struct platform_device *pdev) |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 1350fa25cdb0..a87a868fed64 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np) | |||
197 | * of_phy_find_device - Give a PHY node, find the phy_device | 197 | * of_phy_find_device - Give a PHY node, find the phy_device |
198 | * @phy_np: Pointer to the phy's device tree node | 198 | * @phy_np: Pointer to the phy's device tree node |
199 | * | 199 | * |
200 | * Returns a pointer to the phy_device. | 200 | * If successful, returns a pointer to the phy_device with the embedded |
201 | * struct device refcount incremented by one, or NULL on failure. | ||
201 | */ | 202 | */ |
202 | struct phy_device *of_phy_find_device(struct device_node *phy_np) | 203 | struct phy_device *of_phy_find_device(struct device_node *phy_np) |
203 | { | 204 | { |
@@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device); | |||
217 | * @hndlr: Link state callback for the network device | 218 | * @hndlr: Link state callback for the network device |
218 | * @iface: PHY data interface type | 219 | * @iface: PHY data interface type |
219 | * | 220 | * |
220 | * Returns a pointer to the phy_device if successful. NULL otherwise | 221 | * If successful, returns a pointer to the phy_device with the embedded |
222 | * struct device refcount incremented by one, or NULL on failure. The | ||
223 | * refcount must be dropped by calling phy_disconnect() or phy_detach(). | ||
221 | */ | 224 | */ |
222 | struct phy_device *of_phy_connect(struct net_device *dev, | 225 | struct phy_device *of_phy_connect(struct net_device *dev, |
223 | struct device_node *phy_np, | 226 | struct device_node *phy_np, |
@@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev, | |||
225 | phy_interface_t iface) | 228 | phy_interface_t iface) |
226 | { | 229 | { |
227 | struct phy_device *phy = of_phy_find_device(phy_np); | 230 | struct phy_device *phy = of_phy_find_device(phy_np); |
231 | int ret; | ||
228 | 232 | ||
229 | if (!phy) | 233 | if (!phy) |
230 | return NULL; | 234 | return NULL; |
231 | 235 | ||
232 | phy->dev_flags = flags; | 236 | phy->dev_flags = flags; |
233 | 237 | ||
234 | return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; | 238 | ret = phy_connect_direct(dev, phy, hndlr, iface); |
239 | |||
240 | /* refcount is held by phy_connect_direct() on success */ | ||
241 | put_device(&phy->dev); | ||
242 | |||
243 | return ret ? NULL : phy; | ||
235 | } | 244 | } |
236 | EXPORT_SYMBOL(of_phy_connect); | 245 | EXPORT_SYMBOL(of_phy_connect); |
237 | 246 | ||
@@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect); | |||
241 | * @phy_np: Node pointer for the PHY | 250 | * @phy_np: Node pointer for the PHY |
242 | * @flags: flags to pass to the PHY | 251 | * @flags: flags to pass to the PHY |
243 | * @iface: PHY data interface type | 252 | * @iface: PHY data interface type |
253 | * | ||
254 | * If successful, returns a pointer to the phy_device with the embedded | ||
255 | * struct device refcount incremented by one, or NULL on failure. The | ||
256 | * refcount must be dropped by calling phy_disconnect() or phy_detach(). | ||
244 | */ | 257 | */ |
245 | struct phy_device *of_phy_attach(struct net_device *dev, | 258 | struct phy_device *of_phy_attach(struct net_device *dev, |
246 | struct device_node *phy_np, u32 flags, | 259 | struct device_node *phy_np, u32 flags, |
247 | phy_interface_t iface) | 260 | phy_interface_t iface) |
248 | { | 261 | { |
249 | struct phy_device *phy = of_phy_find_device(phy_np); | 262 | struct phy_device *phy = of_phy_find_device(phy_np); |
263 | int ret; | ||
250 | 264 | ||
251 | if (!phy) | 265 | if (!phy) |
252 | return NULL; | 266 | return NULL; |
253 | 267 | ||
254 | return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; | 268 | ret = phy_attach_direct(dev, phy, flags, iface); |
269 | |||
270 | /* refcount is held by phy_attach_direct() on success */ | ||
271 | put_device(&phy->dev); | ||
272 | |||
273 | return ret ? NULL : phy; | ||
255 | } | 274 | } |
256 | EXPORT_SYMBOL(of_phy_attach); | 275 | EXPORT_SYMBOL(of_phy_attach); |
257 | 276 | ||
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c index 1710d9dc7fc2..2306313c0029 100644 --- a/drivers/of/of_pci_irq.c +++ b/drivers/of/of_pci_irq.c | |||
@@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq | |||
38 | */ | 38 | */ |
39 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); | 39 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); |
40 | if (rc != 0) | 40 | if (rc != 0) |
41 | return rc; | 41 | goto err; |
42 | /* No pin, exit */ | 42 | /* No pin, exit with no error message. */ |
43 | if (pin == 0) | 43 | if (pin == 0) |
44 | return -ENODEV; | 44 | return -ENODEV; |
45 | 45 | ||
@@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq | |||
53 | ppnode = pci_bus_to_OF_node(pdev->bus); | 53 | ppnode = pci_bus_to_OF_node(pdev->bus); |
54 | 54 | ||
55 | /* No node for host bridge ? give up */ | 55 | /* No node for host bridge ? give up */ |
56 | if (ppnode == NULL) | 56 | if (ppnode == NULL) { |
57 | return -EINVAL; | 57 | rc = -EINVAL; |
58 | goto err; | ||
59 | } | ||
58 | } else { | 60 | } else { |
59 | /* We found a P2P bridge, check if it has a node */ | 61 | /* We found a P2P bridge, check if it has a node */ |
60 | ppnode = pci_device_to_OF_node(ppdev); | 62 | ppnode = pci_device_to_OF_node(ppdev); |
@@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq | |||
86 | out_irq->args[0] = pin; | 88 | out_irq->args[0] = pin; |
87 | laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); | 89 | laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); |
88 | laddr[1] = laddr[2] = cpu_to_be32(0); | 90 | laddr[1] = laddr[2] = cpu_to_be32(0); |
89 | return of_irq_parse_raw(laddr, out_irq); | 91 | rc = of_irq_parse_raw(laddr, out_irq); |
92 | if (rc) | ||
93 | goto err; | ||
94 | return 0; | ||
95 | err: | ||
96 | dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc); | ||
97 | return rc; | ||
90 | } | 98 | } |
91 | EXPORT_SYMBOL_GPL(of_irq_parse_pci); | 99 | EXPORT_SYMBOL_GPL(of_irq_parse_pci); |
92 | 100 | ||
@@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) | |||
105 | int ret; | 113 | int ret; |
106 | 114 | ||
107 | ret = of_irq_parse_pci(dev, &oirq); | 115 | ret = of_irq_parse_pci(dev, &oirq); |
108 | if (ret) { | 116 | if (ret) |
109 | dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret); | ||
110 | return 0; /* Proper return code 0 == NO_IRQ */ | 117 | return 0; /* Proper return code 0 == NO_IRQ */ |
111 | } | ||
112 | 118 | ||
113 | return irq_create_of_mapping(&oirq); | 119 | return irq_create_of_mapping(&oirq); |
114 | } | 120 | } |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index baec33c4e698..a0580afe1713 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
@@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus) | |||
560 | } else if (bus->parent) { | 560 | } else if (bus->parent) { |
561 | int i; | 561 | int i; |
562 | 562 | ||
563 | pci_read_bridge_bases(bus); | ||
564 | |||
565 | |||
563 | for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { | 566 | for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { |
564 | if((bus->self->resource[i].flags & | 567 | if((bus->self->resource[i].flags & |
565 | (IORESOURCE_IO | IORESOURCE_MEM)) == 0) | 568 | (IORESOURCE_IO | IORESOURCE_MEM)) == 0) |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 7b9e89ba0465..a32c1f6c252c 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
@@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus) | |||
693 | if (bus->parent) { | 693 | if (bus->parent) { |
694 | int i; | 694 | int i; |
695 | /* PCI-PCI Bridge */ | 695 | /* PCI-PCI Bridge */ |
696 | pci_read_bridge_bases(bus); | ||
696 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) | 697 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) |
697 | pci_claim_bridge_resource(bus->self, i); | 698 | pci_claim_bridge_resource(bus->self, i); |
698 | } else { | 699 | } else { |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 769f7e35f1a2..59ac36fe7c42 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = { | |||
442 | static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, | 442 | static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, |
443 | void *arg) | 443 | void *arg) |
444 | { | 444 | { |
445 | struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); | 445 | struct pci_dev *tdev = pci_get_slot(dev->bus, |
446 | PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); | ||
446 | ssize_t ret; | 447 | ssize_t ret; |
447 | 448 | ||
448 | if (!tdev) | 449 | if (!tdev) |
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, | |||
456 | static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, | 457 | static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, |
457 | const void *arg) | 458 | const void *arg) |
458 | { | 459 | { |
459 | struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); | 460 | struct pci_dev *tdev = pci_get_slot(dev->bus, |
461 | PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); | ||
460 | ssize_t ret; | 462 | ssize_t ret; |
461 | 463 | ||
462 | if (!tdev) | 464 | if (!tdev) |
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = { | |||
473 | .release = pci_vpd_pci22_release, | 475 | .release = pci_vpd_pci22_release, |
474 | }; | 476 | }; |
475 | 477 | ||
476 | static int pci_vpd_f0_dev_check(struct pci_dev *dev) | ||
477 | { | ||
478 | struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); | ||
479 | int ret = 0; | ||
480 | |||
481 | if (!tdev) | ||
482 | return -ENODEV; | ||
483 | if (!tdev->vpd || !tdev->multifunction || | ||
484 | dev->class != tdev->class || dev->vendor != tdev->vendor || | ||
485 | dev->device != tdev->device) | ||
486 | ret = -ENODEV; | ||
487 | |||
488 | pci_dev_put(tdev); | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | int pci_vpd_pci22_init(struct pci_dev *dev) | 478 | int pci_vpd_pci22_init(struct pci_dev *dev) |
493 | { | 479 | { |
494 | struct pci_vpd_pci22 *vpd; | 480 | struct pci_vpd_pci22 *vpd; |
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev) | |||
497 | cap = pci_find_capability(dev, PCI_CAP_ID_VPD); | 483 | cap = pci_find_capability(dev, PCI_CAP_ID_VPD); |
498 | if (!cap) | 484 | if (!cap) |
499 | return -ENODEV; | 485 | return -ENODEV; |
500 | if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { | ||
501 | int ret = pci_vpd_f0_dev_check(dev); | ||
502 | 486 | ||
503 | if (ret) | ||
504 | return ret; | ||
505 | } | ||
506 | vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); | 487 | vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); |
507 | if (!vpd) | 488 | if (!vpd) |
508 | return -ENOMEM; | 489 | return -ENOMEM; |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 6fbd3f2b5992..d3346d23963b 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) | |||
256 | 256 | ||
257 | res->start = start; | 257 | res->start = start; |
258 | res->end = end; | 258 | res->end = end; |
259 | res->flags &= ~IORESOURCE_UNSET; | ||
260 | orig_res.flags &= ~IORESOURCE_UNSET; | ||
259 | dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", | 261 | dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", |
260 | &orig_res, res); | 262 | &orig_res, res); |
261 | 263 | ||
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 81253e70b1c5..0aa81bd3de12 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c | |||
@@ -110,7 +110,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | |||
110 | return -EINVAL; | 110 | return -EINVAL; |
111 | } | 111 | } |
112 | 112 | ||
113 | static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) | 113 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) |
114 | { | 114 | { |
115 | unsigned int irq = irq_desc_get_irq(desc); | 115 | unsigned int irq = irq_desc_get_irq(desc); |
116 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | 116 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
@@ -138,8 +138,7 @@ static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) | |||
138 | * Traverse through pending legacy interrupts and invoke handler for each. Also | 138 | * Traverse through pending legacy interrupts and invoke handler for each. Also |
139 | * takes care of interrupt controller level mask/ack operation. | 139 | * takes care of interrupt controller level mask/ack operation. |
140 | */ | 140 | */ |
141 | static void ks_pcie_legacy_irq_handler(unsigned int __irq, | 141 | static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) |
142 | struct irq_desc *desc) | ||
143 | { | 142 | { |
144 | unsigned int irq = irq_desc_get_irq(desc); | 143 | unsigned int irq = irq_desc_get_irq(desc); |
145 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | 144 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 367e28fa7564..c4f64bfee551 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c | |||
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev) | |||
362 | static struct of_device_id rcar_pci_of_match[] = { | 362 | static struct of_device_id rcar_pci_of_match[] = { |
363 | { .compatible = "renesas,pci-r8a7790", }, | 363 | { .compatible = "renesas,pci-r8a7790", }, |
364 | { .compatible = "renesas,pci-r8a7791", }, | 364 | { .compatible = "renesas,pci-r8a7791", }, |
365 | { .compatible = "renesas,pci-r8a7794", }, | ||
365 | { }, | 366 | { }, |
366 | }; | 367 | }; |
367 | 368 | ||
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 996327cfa1e1..e491681daf22 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c | |||
@@ -295,7 +295,7 @@ static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) | |||
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) | 298 | static void xgene_msi_isr(struct irq_desc *desc) |
299 | { | 299 | { |
300 | struct irq_chip *chip = irq_desc_get_chip(desc); | 300 | struct irq_chip *chip = irq_desc_get_chip(desc); |
301 | struct xgene_msi_group *msi_groups; | 301 | struct xgene_msi_group *msi_groups; |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d4497141d083..4a7da3c3e035 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1243,6 +1243,10 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) | |||
1243 | BUG_ON(!chip); | 1243 | BUG_ON(!chip); |
1244 | if (!chip->irq_write_msi_msg) | 1244 | if (!chip->irq_write_msi_msg) |
1245 | chip->irq_write_msi_msg = pci_msi_domain_write_msg; | 1245 | chip->irq_write_msi_msg = pci_msi_domain_write_msg; |
1246 | if (!chip->irq_mask) | ||
1247 | chip->irq_mask = pci_msi_mask_irq; | ||
1248 | if (!chip->irq_unmask) | ||
1249 | chip->irq_unmask = pci_msi_unmask_irq; | ||
1246 | } | 1250 | } |
1247 | 1251 | ||
1248 | /** | 1252 | /** |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) | |||
299 | * Unbound PCI devices are always put in D0, regardless of | 299 | * Unbound PCI devices are always put in D0, regardless of |
300 | * runtime PM status. During probe, the device is set to | 300 | * runtime PM status. During probe, the device is set to |
301 | * active and the usage count is incremented. If the driver | 301 | * active and the usage count is incremented. If the driver |
302 | * supports runtime PM, it should call pm_runtime_put_noidle() | 302 | * supports runtime PM, it should call pm_runtime_put_noidle(), |
303 | * in its probe routine and pm_runtime_get_noresume() in its | 303 | * or any other runtime PM helper function decrementing the usage |
304 | * remove routine. | 304 | * count, in its probe routine and pm_runtime_get_noresume() in |
305 | * its remove routine. | ||
305 | */ | 306 | */ |
306 | pm_runtime_get_sync(dev); | 307 | pm_runtime_get_sync(dev); |
307 | pci_dev->driver = pci_drv; | 308 | pci_dev->driver = pci_drv; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 0b2be174d981..8361d27e5eca 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) | |||
676 | static void pci_set_bus_msi_domain(struct pci_bus *bus) | 676 | static void pci_set_bus_msi_domain(struct pci_bus *bus) |
677 | { | 677 | { |
678 | struct irq_domain *d; | 678 | struct irq_domain *d; |
679 | struct pci_bus *b; | ||
679 | 680 | ||
680 | /* | 681 | /* |
681 | * Either bus is the root, and we must obtain it from the | 682 | * The bus can be a root bus, a subordinate bus, or a virtual bus |
682 | * firmware, or we inherit it from the bridge device. | 683 | * created by an SR-IOV device. Walk up to the first bridge device |
684 | * found or derive the domain from the host bridge. | ||
683 | */ | 685 | */ |
684 | if (pci_is_root_bus(bus)) | 686 | for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { |
685 | d = pci_host_bridge_msi_domain(bus); | 687 | if (b->self) |
686 | else | 688 | d = dev_get_msi_domain(&b->self->dev); |
687 | d = dev_get_msi_domain(&bus->self->dev); | 689 | } |
690 | |||
691 | if (!d) | ||
692 | d = pci_host_bridge_msi_domain(b); | ||
688 | 693 | ||
689 | dev_set_msi_domain(&bus->dev, d); | 694 | dev_set_msi_domain(&bus->dev, d); |
690 | } | 695 | } |
@@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
855 | child->bridge_ctl = bctl; | 860 | child->bridge_ctl = bctl; |
856 | } | 861 | } |
857 | 862 | ||
858 | /* Read and initialize bridge resources */ | ||
859 | pci_read_bridge_bases(child); | ||
860 | |||
861 | cmax = pci_scan_child_bus(child); | 863 | cmax = pci_scan_child_bus(child); |
862 | if (cmax > subordinate) | 864 | if (cmax > subordinate) |
863 | dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", | 865 | dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", |
@@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |||
918 | 920 | ||
919 | if (!is_cardbus) { | 921 | if (!is_cardbus) { |
920 | child->bridge_ctl = bctl; | 922 | child->bridge_ctl = bctl; |
921 | |||
922 | /* Read and initialize bridge resources */ | ||
923 | pci_read_bridge_bases(child); | ||
924 | max = pci_scan_child_bus(child); | 923 | max = pci_scan_child_bus(child); |
925 | } else { | 924 | } else { |
926 | /* | 925 | /* |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6a30252cd79f..b03373fd05ca 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev) | |||
1907 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, | 1907 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, |
1908 | PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); | 1908 | PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); |
1909 | 1909 | ||
1910 | /* | ||
1911 | * Quirk non-zero PCI functions to route VPD access through function 0 for | ||
1912 | * devices that share VPD resources between functions. The functions are | ||
1913 | * expected to be identical devices. | ||
1914 | */ | ||
1910 | static void quirk_f0_vpd_link(struct pci_dev *dev) | 1915 | static void quirk_f0_vpd_link(struct pci_dev *dev) |
1911 | { | 1916 | { |
1912 | if (!dev->multifunction || !PCI_FUNC(dev->devfn)) | 1917 | struct pci_dev *f0; |
1918 | |||
1919 | if (!PCI_FUNC(dev->devfn)) | ||
1913 | return; | 1920 | return; |
1914 | dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; | 1921 | |
1922 | f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); | ||
1923 | if (!f0) | ||
1924 | return; | ||
1925 | |||
1926 | if (f0->vpd && dev->class == f0->class && | ||
1927 | dev->vendor == f0->vendor && dev->device == f0->device) | ||
1928 | dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; | ||
1929 | |||
1930 | pci_dev_put(f0); | ||
1915 | } | 1931 | } |
1916 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, | 1932 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, |
1917 | PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); | 1933 | PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); |
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 0062027afb1e..77a2e054fdea 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c | |||
@@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = { | |||
276 | { .compatible = "marvell,berlin2q-sata-phy" }, | 276 | { .compatible = "marvell,berlin2q-sata-phy" }, |
277 | { }, | 277 | { }, |
278 | }; | 278 | }; |
279 | MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); | ||
279 | 280 | ||
280 | static struct platform_driver phy_berlin_sata_driver = { | 281 | static struct platform_driver phy_berlin_sata_driver = { |
281 | .probe = phy_berlin_sata_probe, | 282 | .probe = phy_berlin_sata_probe, |
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 49a1ed0cef56..107cb57c3513 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c | |||
@@ -432,6 +432,7 @@ out_disable_src: | |||
432 | out: | 432 | out: |
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); | ||
435 | 436 | ||
436 | static | 437 | static |
437 | int ufs_qcom_phy_disable_vreg(struct phy *phy, | 438 | int ufs_qcom_phy_disable_vreg(struct phy *phy, |
@@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) | |||
474 | phy->is_ref_clk_enabled = false; | 475 | phy->is_ref_clk_enabled = false; |
475 | } | 476 | } |
476 | } | 477 | } |
478 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); | ||
477 | 479 | ||
478 | #define UFS_REF_CLK_EN (1 << 5) | 480 | #define UFS_REF_CLK_EN (1 << 5) |
479 | 481 | ||
@@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | |||
517 | { | 519 | { |
518 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | 520 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); |
519 | } | 521 | } |
522 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); | ||
520 | 523 | ||
521 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | 524 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) |
522 | { | 525 | { |
523 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | 526 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); |
524 | } | 527 | } |
528 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); | ||
525 | 529 | ||
526 | /* Turn ON M-PHY RMMI interface clocks */ | 530 | /* Turn ON M-PHY RMMI interface clocks */ |
527 | int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) | 531 | int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) |
@@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) | |||
550 | out: | 554 | out: |
551 | return ret; | 555 | return ret; |
552 | } | 556 | } |
557 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); | ||
553 | 558 | ||
554 | /* Turn OFF M-PHY RMMI interface clocks */ | 559 | /* Turn OFF M-PHY RMMI interface clocks */ |
555 | void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) | 560 | void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) |
@@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) | |||
562 | phy->is_iface_clk_enabled = false; | 567 | phy->is_iface_clk_enabled = false; |
563 | } | 568 | } |
564 | } | 569 | } |
570 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); | ||
565 | 571 | ||
566 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | 572 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) |
567 | { | 573 | { |
@@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | |||
578 | 584 | ||
579 | return ret; | 585 | return ret; |
580 | } | 586 | } |
587 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); | ||
581 | 588 | ||
582 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | 589 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) |
583 | { | 590 | { |
@@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | |||
595 | 602 | ||
596 | return ret; | 603 | return ret; |
597 | } | 604 | } |
605 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); | ||
598 | 606 | ||
599 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | 607 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, |
600 | u8 major, u16 minor, u16 step) | 608 | u8 major, u16 minor, u16 step) |
@@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | |||
605 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | 613 | ufs_qcom_phy->host_ctrl_rev_minor = minor; |
606 | ufs_qcom_phy->host_ctrl_rev_step = step; | 614 | ufs_qcom_phy->host_ctrl_rev_step = step; |
607 | } | 615 | } |
616 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); | ||
608 | 617 | ||
609 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | 618 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) |
610 | { | 619 | { |
@@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | |||
625 | 634 | ||
626 | return ret; | 635 | return ret; |
627 | } | 636 | } |
637 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); | ||
628 | 638 | ||
629 | int ufs_qcom_phy_remove(struct phy *generic_phy, | 639 | int ufs_qcom_phy_remove(struct phy *generic_phy, |
630 | struct ufs_qcom_phy *ufs_qcom_phy) | 640 | struct ufs_qcom_phy *ufs_qcom_phy) |
@@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) | |||
662 | return ufs_qcom_phy->phy_spec_ops-> | 672 | return ufs_qcom_phy->phy_spec_ops-> |
663 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | 673 | is_physical_coding_sublayer_ready(ufs_qcom_phy); |
664 | } | 674 | } |
675 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); | ||
665 | 676 | ||
666 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | 677 | int ufs_qcom_phy_power_on(struct phy *generic_phy) |
667 | { | 678 | { |
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 5a5c073e72fe..91d6f342c565 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c | |||
@@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) | |||
98 | struct device_node *child; | 98 | struct device_node *child; |
99 | struct regmap *grf; | 99 | struct regmap *grf; |
100 | unsigned int reg_offset; | 100 | unsigned int reg_offset; |
101 | int err; | ||
101 | 102 | ||
102 | grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); | 103 | grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); |
103 | if (IS_ERR(grf)) { | 104 | if (IS_ERR(grf)) { |
@@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) | |||
129 | return PTR_ERR(rk_phy->phy); | 130 | return PTR_ERR(rk_phy->phy); |
130 | } | 131 | } |
131 | phy_set_drvdata(rk_phy->phy, rk_phy); | 132 | phy_set_drvdata(rk_phy->phy, rk_phy); |
133 | |||
134 | /* only power up usb phy when it use, so disable it when init*/ | ||
135 | err = rockchip_usb_phy_power(rk_phy, 1); | ||
136 | if (err) | ||
137 | return err; | ||
132 | } | 138 | } |
133 | 139 | ||
134 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | 140 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); |
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c index 7d9482bf8252..1ca783098e47 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c | |||
@@ -143,7 +143,7 @@ static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg, | |||
143 | return !!(readl(chip->base + offset) & BIT(shift)); | 143 | return !!(readl(chip->base + offset) & BIT(shift)); |
144 | } | 144 | } |
145 | 145 | ||
146 | static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 146 | static void cygnus_gpio_irq_handler(struct irq_desc *desc) |
147 | { | 147 | { |
148 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 148 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
149 | struct cygnus_gpio *chip = to_cygnus_gpio(gc); | 149 | struct cygnus_gpio *chip = to_cygnus_gpio(gc); |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 69723e07036b..9638a00c67c2 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
@@ -349,6 +349,9 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio) | |||
349 | struct pinctrl_gpio_range *range = NULL; | 349 | struct pinctrl_gpio_range *range = NULL; |
350 | struct gpio_chip *chip = gpio_to_chip(gpio); | 350 | struct gpio_chip *chip = gpio_to_chip(gpio); |
351 | 351 | ||
352 | if (WARN(!chip, "no gpio_chip for gpio%i?", gpio)) | ||
353 | return false; | ||
354 | |||
352 | mutex_lock(&pinctrldev_list_mutex); | 355 | mutex_lock(&pinctrldev_list_mutex); |
353 | 356 | ||
354 | /* Loop over the pin controllers */ | 357 | /* Loop over the pin controllers */ |
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index faf635654312..293ed4381cc0 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c | |||
@@ -26,7 +26,8 @@ | |||
26 | #include "pinctrl-imx.h" | 26 | #include "pinctrl-imx.h" |
27 | 27 | ||
28 | enum imx25_pads { | 28 | enum imx25_pads { |
29 | MX25_PAD_RESERVE0 = 1, | 29 | MX25_PAD_RESERVE0 = 0, |
30 | MX25_PAD_RESERVE1 = 1, | ||
30 | MX25_PAD_A10 = 2, | 31 | MX25_PAD_A10 = 2, |
31 | MX25_PAD_A13 = 3, | 32 | MX25_PAD_A13 = 3, |
32 | MX25_PAD_A14 = 4, | 33 | MX25_PAD_A14 = 4, |
@@ -169,6 +170,7 @@ enum imx25_pads { | |||
169 | /* Pad names for the pinmux subsystem */ | 170 | /* Pad names for the pinmux subsystem */ |
170 | static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { | 171 | static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { |
171 | IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), | 172 | IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), |
173 | IMX_PINCTRL_PIN(MX25_PAD_RESERVE1), | ||
172 | IMX_PINCTRL_PIN(MX25_PAD_A10), | 174 | IMX_PINCTRL_PIN(MX25_PAD_A10), |
173 | IMX_PINCTRL_PIN(MX25_PAD_A13), | 175 | IMX_PINCTRL_PIN(MX25_PAD_A13), |
174 | IMX_PINCTRL_PIN(MX25_PAD_A14), | 176 | IMX_PINCTRL_PIN(MX25_PAD_A14), |
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index dac4865f3203..f79ea430f651 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -425,7 +425,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) | |||
425 | } | 425 | } |
426 | } | 426 | } |
427 | 427 | ||
428 | static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 428 | static void byt_gpio_irq_handler(struct irq_desc *desc) |
429 | { | 429 | { |
430 | struct irq_data *data = irq_desc_get_irq_data(desc); | 430 | struct irq_data *data = irq_desc_get_irq_data(desc); |
431 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); | 431 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2d5d3ddc36e5..270c127e03ea 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1414,7 +1414,7 @@ static struct irq_chip chv_gpio_irqchip = { | |||
1414 | .flags = IRQCHIP_SKIP_SET_WAKE, | 1414 | .flags = IRQCHIP_SKIP_SET_WAKE, |
1415 | }; | 1415 | }; |
1416 | 1416 | ||
1417 | static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 1417 | static void chv_gpio_irq_handler(struct irq_desc *desc) |
1418 | { | 1418 | { |
1419 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 1419 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
1420 | struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); | 1420 | struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index bb377c110541..54848b8decef 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
@@ -836,7 +836,7 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc, | |||
836 | } | 836 | } |
837 | } | 837 | } |
838 | 838 | ||
839 | static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 839 | static void intel_gpio_irq_handler(struct irq_desc *desc) |
840 | { | 840 | { |
841 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 841 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
842 | struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); | 842 | struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 7726c6caaf83..1b22f96ba839 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
@@ -1190,7 +1190,7 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index) | |||
1190 | } | 1190 | } |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc) | 1193 | static void mtk_eint_irq_handler(struct irq_desc *desc) |
1194 | { | 1194 | { |
1195 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1195 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1196 | struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc); | 1196 | struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 352ede13a9e9..96cf03908e93 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c | |||
@@ -860,7 +860,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status) | |||
860 | chained_irq_exit(host_chip, desc); | 860 | chained_irq_exit(host_chip, desc); |
861 | } | 861 | } |
862 | 862 | ||
863 | static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 863 | static void nmk_gpio_irq_handler(struct irq_desc *desc) |
864 | { | 864 | { |
865 | struct gpio_chip *chip = irq_desc_get_handler_data(desc); | 865 | struct gpio_chip *chip = irq_desc_get_handler_data(desc); |
866 | struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); | 866 | struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); |
@@ -873,7 +873,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
873 | __nmk_gpio_irq_handler(desc, status); | 873 | __nmk_gpio_irq_handler(desc, status); |
874 | } | 874 | } |
875 | 875 | ||
876 | static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc) | 876 | static void nmk_gpio_latent_irq_handler(struct irq_desc *desc) |
877 | { | 877 | { |
878 | struct gpio_chip *chip = irq_desc_get_handler_data(desc); | 878 | struct gpio_chip *chip = irq_desc_get_handler_data(desc); |
879 | struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); | 879 | struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); |
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index a5976ebc4482..f6be68518c87 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c | |||
@@ -530,8 +530,7 @@ static inline void preflow_handler(struct irq_desc *desc) | |||
530 | static inline void preflow_handler(struct irq_desc *desc) { } | 530 | static inline void preflow_handler(struct irq_desc *desc) { } |
531 | #endif | 531 | #endif |
532 | 532 | ||
533 | static void adi_gpio_handle_pint_irq(unsigned int inta_irq, | 533 | static void adi_gpio_handle_pint_irq(struct irq_desc *desc) |
534 | struct irq_desc *desc) | ||
535 | { | 534 | { |
536 | u32 request; | 535 | u32 request; |
537 | u32 level_mask, hwirq; | 536 | u32 level_mask, hwirq; |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 5e86bb8ca80e..3318f1d6193c 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -492,15 +492,15 @@ static struct irq_chip amd_gpio_irqchip = { | |||
492 | .irq_set_type = amd_gpio_irq_set_type, | 492 | .irq_set_type = amd_gpio_irq_set_type, |
493 | }; | 493 | }; |
494 | 494 | ||
495 | static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) | 495 | static void amd_gpio_irq_handler(struct irq_desc *desc) |
496 | { | 496 | { |
497 | unsigned int irq = irq_desc_get_irq(desc); | ||
498 | u32 i; | 497 | u32 i; |
499 | u32 off; | 498 | u32 off; |
500 | u32 reg; | 499 | u32 reg; |
501 | u32 pin_reg; | 500 | u32 pin_reg; |
502 | u64 reg64; | 501 | u64 reg64; |
503 | int handled = 0; | 502 | int handled = 0; |
503 | unsigned int irq; | ||
504 | unsigned long flags; | 504 | unsigned long flags; |
505 | struct irq_chip *chip = irq_desc_get_chip(desc); | 505 | struct irq_chip *chip = irq_desc_get_chip(desc); |
506 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 506 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
@@ -541,7 +541,7 @@ static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) | |||
541 | } | 541 | } |
542 | 542 | ||
543 | if (handled == 0) | 543 | if (handled == 0) |
544 | handle_bad_irq(irq, desc); | 544 | handle_bad_irq(desc); |
545 | 545 | ||
546 | spin_lock_irqsave(&gpio_dev->lock, flags); | 546 | spin_lock_irqsave(&gpio_dev->lock, flags); |
547 | reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); | 547 | reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index bae0012ee356..b0fde0f385e6 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -1585,7 +1585,7 @@ static struct irq_chip gpio_irqchip = { | |||
1585 | .irq_set_wake = gpio_irq_set_wake, | 1585 | .irq_set_wake = gpio_irq_set_wake, |
1586 | }; | 1586 | }; |
1587 | 1587 | ||
1588 | static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 1588 | static void gpio_irq_handler(struct irq_desc *desc) |
1589 | { | 1589 | { |
1590 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1590 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1591 | struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc); | 1591 | struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index 3731cc67a88b..9c9b88934bcc 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c | |||
@@ -519,7 +519,7 @@ static struct irq_chip u300_gpio_irqchip = { | |||
519 | .irq_set_type = u300_gpio_irq_type, | 519 | .irq_set_type = u300_gpio_irq_type, |
520 | }; | 520 | }; |
521 | 521 | ||
522 | static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc) | 522 | static void u300_gpio_irq_handler(struct irq_desc *desc) |
523 | { | 523 | { |
524 | unsigned int irq = irq_desc_get_irq(desc); | 524 | unsigned int irq = irq_desc_get_irq(desc); |
525 | struct irq_chip *parent_chip = irq_desc_get_chip(desc); | 525 | struct irq_chip *parent_chip = irq_desc_get_chip(desc); |
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c index 461fffc4c62a..11f8b835d3b6 100644 --- a/drivers/pinctrl/pinctrl-digicolor.c +++ b/drivers/pinctrl/pinctrl-digicolor.c | |||
@@ -337,9 +337,9 @@ static int dc_pinctrl_probe(struct platform_device *pdev) | |||
337 | pmap->dev = &pdev->dev; | 337 | pmap->dev = &pdev->dev; |
338 | 338 | ||
339 | pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap); | 339 | pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap); |
340 | if (!pmap->pctl) { | 340 | if (IS_ERR(pmap->pctl)) { |
341 | dev_err(&pdev->dev, "pinctrl driver registration failed\n"); | 341 | dev_err(&pdev->dev, "pinctrl driver registration failed\n"); |
342 | return -EINVAL; | 342 | return PTR_ERR(pmap->pctl); |
343 | } | 343 | } |
344 | 344 | ||
345 | ret = dc_gpiochip_add(pmap, pdev->dev.of_node); | 345 | ret = dc_gpiochip_add(pmap, pdev->dev.of_node); |
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 3dc2ae15f3a1..952b1c623887 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c | |||
@@ -1303,20 +1303,18 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type) | |||
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | if (type & IRQ_TYPE_LEVEL_MASK) | 1305 | if (type & IRQ_TYPE_LEVEL_MASK) |
1306 | __irq_set_handler_locked(data->irq, handle_level_irq); | 1306 | irq_set_handler_locked(data, handle_level_irq); |
1307 | else | 1307 | else |
1308 | __irq_set_handler_locked(data->irq, handle_edge_irq); | 1308 | irq_set_handler_locked(data, handle_edge_irq); |
1309 | 1309 | ||
1310 | return 0; | 1310 | return 0; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | static void pistachio_gpio_irq_handler(unsigned int __irq, | 1313 | static void pistachio_gpio_irq_handler(struct irq_desc *desc) |
1314 | struct irq_desc *desc) | ||
1315 | { | 1314 | { |
1316 | unsigned int irq = irq_desc_get_irq(desc); | ||
1317 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 1315 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
1318 | struct pistachio_gpio_bank *bank = gc_to_bank(gc); | 1316 | struct pistachio_gpio_bank *bank = gc_to_bank(gc); |
1319 | struct irq_chip *chip = irq_get_chip(irq); | 1317 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1320 | unsigned long pending; | 1318 | unsigned long pending; |
1321 | unsigned int pin; | 1319 | unsigned int pin; |
1322 | 1320 | ||
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index c5246c05f70c..88bb707e107a 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c | |||
@@ -1475,7 +1475,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = { | |||
1475 | * Interrupt handling | 1475 | * Interrupt handling |
1476 | */ | 1476 | */ |
1477 | 1477 | ||
1478 | static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc) | 1478 | static void rockchip_irq_demux(struct irq_desc *desc) |
1479 | { | 1479 | { |
1480 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1480 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1481 | struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc); | 1481 | struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index bf548c2a7a9d..ef04b962c3d5 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
@@ -1679,7 +1679,7 @@ static irqreturn_t pcs_irq_handler(int irq, void *d) | |||
1679 | * Use this if you have a separate interrupt for each | 1679 | * Use this if you have a separate interrupt for each |
1680 | * pinctrl-single instance. | 1680 | * pinctrl-single instance. |
1681 | */ | 1681 | */ |
1682 | static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) | 1682 | static void pcs_irq_chain_handler(struct irq_desc *desc) |
1683 | { | 1683 | { |
1684 | struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); | 1684 | struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); |
1685 | struct irq_chip *chip; | 1685 | struct irq_chip *chip; |
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index f8338d2e6b6b..389526e704fb 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c | |||
@@ -1460,7 +1460,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank) | |||
1460 | } | 1460 | } |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 1463 | static void st_gpio_irq_handler(struct irq_desc *desc) |
1464 | { | 1464 | { |
1465 | /* interrupt dedicated per bank */ | 1465 | /* interrupt dedicated per bank */ |
1466 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1466 | struct irq_chip *chip = irq_desc_get_chip(desc); |
@@ -1472,7 +1472,7 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
1472 | chained_irq_exit(chip, desc); | 1472 | chained_irq_exit(chip, desc); |
1473 | } | 1473 | } |
1474 | 1474 | ||
1475 | static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) | 1475 | static void st_gpio_irqmux_handler(struct irq_desc *desc) |
1476 | { | 1476 | { |
1477 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1477 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1478 | struct st_pinctrl *info = irq_desc_get_handler_data(desc); | 1478 | struct st_pinctrl *info = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 67e08cb315c4..29984b36926a 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c | |||
@@ -313,8 +313,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev, | |||
313 | 313 | ||
314 | /* See if this pctldev has this function */ | 314 | /* See if this pctldev has this function */ |
315 | while (selector < nfuncs) { | 315 | while (selector < nfuncs) { |
316 | const char *fname = ops->get_function_name(pctldev, | 316 | const char *fname = ops->get_function_name(pctldev, selector); |
317 | selector); | ||
318 | 317 | ||
319 | if (!strcmp(function, fname)) | 318 | if (!strcmp(function, fname)) |
320 | return selector; | 319 | return selector; |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 492cdd51dc5c..a0c7407c1cac 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
@@ -765,9 +765,8 @@ static struct irq_chip msm_gpio_irq_chip = { | |||
765 | .irq_set_wake = msm_gpio_irq_set_wake, | 765 | .irq_set_wake = msm_gpio_irq_set_wake, |
766 | }; | 766 | }; |
767 | 767 | ||
768 | static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) | 768 | static void msm_gpio_irq_handler(struct irq_desc *desc) |
769 | { | 769 | { |
770 | unsigned int irq = irq_desc_get_irq(desc); | ||
771 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 770 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
772 | const struct msm_pingroup *g; | 771 | const struct msm_pingroup *g; |
773 | struct msm_pinctrl *pctrl = to_msm_pinctrl(gc); | 772 | struct msm_pinctrl *pctrl = to_msm_pinctrl(gc); |
@@ -795,7 +794,7 @@ static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) | |||
795 | 794 | ||
796 | /* No interrupts were flagged */ | 795 | /* No interrupts were flagged */ |
797 | if (handled == 0) | 796 | if (handled == 0) |
798 | handle_bad_irq(irq, desc); | 797 | handle_bad_irq(desc); |
799 | 798 | ||
800 | chained_irq_exit(chip, desc); | 799 | chained_irq_exit(chip, desc); |
801 | } | 800 | } |
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index c978b311031b..e1a3721bc8e5 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c | |||
@@ -723,9 +723,9 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) | |||
723 | #endif | 723 | #endif |
724 | 724 | ||
725 | pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); | 725 | pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); |
726 | if (!pctrl->pctrl) { | 726 | if (IS_ERR(pctrl->pctrl)) { |
727 | dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n"); | 727 | dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n"); |
728 | return -ENODEV; | 728 | return PTR_ERR(pctrl->pctrl); |
729 | } | 729 | } |
730 | 730 | ||
731 | pctrl->chip = pm8xxx_gpio_template; | 731 | pctrl->chip = pm8xxx_gpio_template; |
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 2d1b69f171be..6652b8d7f707 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c | |||
@@ -814,9 +814,9 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) | |||
814 | #endif | 814 | #endif |
815 | 815 | ||
816 | pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); | 816 | pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); |
817 | if (!pctrl->pctrl) { | 817 | if (IS_ERR(pctrl->pctrl)) { |
818 | dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n"); | 818 | dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n"); |
819 | return -ENODEV; | 819 | return PTR_ERR(pctrl->pctrl); |
820 | } | 820 | } |
821 | 821 | ||
822 | pctrl->chip = pm8xxx_mpp_template; | 822 | pctrl->chip = pm8xxx_mpp_template; |
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 5f45caaef46d..71ccf6a90b22 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c | |||
@@ -419,7 +419,7 @@ static const struct of_device_id exynos_wkup_irq_ids[] = { | |||
419 | }; | 419 | }; |
420 | 420 | ||
421 | /* interrupt handler for wakeup interrupts 0..15 */ | 421 | /* interrupt handler for wakeup interrupts 0..15 */ |
422 | static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) | 422 | static void exynos_irq_eint0_15(struct irq_desc *desc) |
423 | { | 423 | { |
424 | struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc); | 424 | struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc); |
425 | struct samsung_pin_bank *bank = eintd->bank; | 425 | struct samsung_pin_bank *bank = eintd->bank; |
@@ -451,7 +451,7 @@ static inline void exynos_irq_demux_eint(unsigned long pend, | |||
451 | } | 451 | } |
452 | 452 | ||
453 | /* interrupt handler for wakeup interrupt 16 */ | 453 | /* interrupt handler for wakeup interrupt 16 */ |
454 | static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) | 454 | static void exynos_irq_demux_eint16_31(struct irq_desc *desc) |
455 | { | 455 | { |
456 | struct irq_chip *chip = irq_desc_get_chip(desc); | 456 | struct irq_chip *chip = irq_desc_get_chip(desc); |
457 | struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); | 457 | struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); |
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 019844d479bb..3d92f827da7a 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c | |||
@@ -240,7 +240,7 @@ static struct irq_chip s3c2410_eint0_3_chip = { | |||
240 | .irq_set_type = s3c24xx_eint_type, | 240 | .irq_set_type = s3c24xx_eint_type, |
241 | }; | 241 | }; |
242 | 242 | ||
243 | static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc) | 243 | static void s3c2410_demux_eint0_3(struct irq_desc *desc) |
244 | { | 244 | { |
245 | struct irq_data *data = irq_desc_get_irq_data(desc); | 245 | struct irq_data *data = irq_desc_get_irq_data(desc); |
246 | struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); | 246 | struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); |
@@ -295,7 +295,7 @@ static struct irq_chip s3c2412_eint0_3_chip = { | |||
295 | .irq_set_type = s3c24xx_eint_type, | 295 | .irq_set_type = s3c24xx_eint_type, |
296 | }; | 296 | }; |
297 | 297 | ||
298 | static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc) | 298 | static void s3c2412_demux_eint0_3(struct irq_desc *desc) |
299 | { | 299 | { |
300 | struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); | 300 | struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); |
301 | struct irq_data *data = irq_desc_get_irq_data(desc); | 301 | struct irq_data *data = irq_desc_get_irq_data(desc); |
@@ -361,7 +361,7 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, | |||
361 | u32 offset, u32 range) | 361 | u32 offset, u32 range) |
362 | { | 362 | { |
363 | struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); | 363 | struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); |
364 | struct irq_chip *chip = irq_desc_get_irq_chip(desc); | 364 | struct irq_chip *chip = irq_desc_get_chip(desc); |
365 | struct samsung_pinctrl_drv_data *d = data->drvdata; | 365 | struct samsung_pinctrl_drv_data *d = data->drvdata; |
366 | unsigned int pend, mask; | 366 | unsigned int pend, mask; |
367 | 367 | ||
@@ -388,12 +388,12 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, | |||
388 | chained_irq_exit(chip, desc); | 388 | chained_irq_exit(chip, desc); |
389 | } | 389 | } |
390 | 390 | ||
391 | static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc) | 391 | static void s3c24xx_demux_eint4_7(struct irq_desc *desc) |
392 | { | 392 | { |
393 | s3c24xx_demux_eint(desc, 0, 0xf0); | 393 | s3c24xx_demux_eint(desc, 0, 0xf0); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc) | 396 | static void s3c24xx_demux_eint8_23(struct irq_desc *desc) |
397 | { | 397 | { |
398 | s3c24xx_demux_eint(desc, 8, 0xffff00); | 398 | s3c24xx_demux_eint(desc, 8, 0xffff00); |
399 | } | 399 | } |
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index f5ea40a69711..43407ab248f5 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c | |||
@@ -407,7 +407,7 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = { | |||
407 | .xlate = irq_domain_xlate_twocell, | 407 | .xlate = irq_domain_xlate_twocell, |
408 | }; | 408 | }; |
409 | 409 | ||
410 | static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc) | 410 | static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) |
411 | { | 411 | { |
412 | struct irq_chip *chip = irq_desc_get_chip(desc); | 412 | struct irq_chip *chip = irq_desc_get_chip(desc); |
413 | struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); | 413 | struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); |
@@ -631,22 +631,22 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range) | |||
631 | chained_irq_exit(chip, desc); | 631 | chained_irq_exit(chip, desc); |
632 | } | 632 | } |
633 | 633 | ||
634 | static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc) | 634 | static void s3c64xx_demux_eint0_3(struct irq_desc *desc) |
635 | { | 635 | { |
636 | s3c64xx_irq_demux_eint(desc, 0xf); | 636 | s3c64xx_irq_demux_eint(desc, 0xf); |
637 | } | 637 | } |
638 | 638 | ||
639 | static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc) | 639 | static void s3c64xx_demux_eint4_11(struct irq_desc *desc) |
640 | { | 640 | { |
641 | s3c64xx_irq_demux_eint(desc, 0xff0); | 641 | s3c64xx_irq_demux_eint(desc, 0xff0); |
642 | } | 642 | } |
643 | 643 | ||
644 | static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc) | 644 | static void s3c64xx_demux_eint12_19(struct irq_desc *desc) |
645 | { | 645 | { |
646 | s3c64xx_irq_demux_eint(desc, 0xff000); | 646 | s3c64xx_irq_demux_eint(desc, 0xff000); |
647 | } | 647 | } |
648 | 648 | ||
649 | static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc) | 649 | static void s3c64xx_demux_eint20_27(struct irq_desc *desc) |
650 | { | 650 | { |
651 | s3c64xx_irq_demux_eint(desc, 0xff00000); | 651 | s3c64xx_irq_demux_eint(desc, 0xff00000); |
652 | } | 652 | } |
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 9df0c5f25824..0d24d9e4b70c 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c | |||
@@ -4489,7 +4489,7 @@ static struct irq_chip atlas7_gpio_irq_chip = { | |||
4489 | .irq_set_type = atlas7_gpio_irq_type, | 4489 | .irq_set_type = atlas7_gpio_irq_type, |
4490 | }; | 4490 | }; |
4491 | 4491 | ||
4492 | static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) | 4492 | static void atlas7_gpio_handle_irq(struct irq_desc *desc) |
4493 | { | 4493 | { |
4494 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 4494 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
4495 | struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc); | 4495 | struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc); |
@@ -4512,7 +4512,7 @@ static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) | |||
4512 | if (!status) { | 4512 | if (!status) { |
4513 | pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n", | 4513 | pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n", |
4514 | __func__, gc->label, status); | 4514 | __func__, gc->label, status); |
4515 | handle_bad_irq(irq, desc); | 4515 | handle_bad_irq(desc); |
4516 | return; | 4516 | return; |
4517 | } | 4517 | } |
4518 | 4518 | ||
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index f8bd9fb52033..2a8d69725de8 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c | |||
@@ -545,7 +545,7 @@ static struct irq_chip sirfsoc_irq_chip = { | |||
545 | .irq_set_type = sirfsoc_gpio_irq_type, | 545 | .irq_set_type = sirfsoc_gpio_irq_type, |
546 | }; | 546 | }; |
547 | 547 | ||
548 | static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) | 548 | static void sirfsoc_gpio_handle_irq(struct irq_desc *desc) |
549 | { | 549 | { |
550 | unsigned int irq = irq_desc_get_irq(desc); | 550 | unsigned int irq = irq_desc_get_irq(desc); |
551 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 551 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
@@ -570,7 +570,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) | |||
570 | printk(KERN_WARNING | 570 | printk(KERN_WARNING |
571 | "%s: gpio id %d status %#x no interrupt is flagged\n", | 571 | "%s: gpio id %d status %#x no interrupt is flagged\n", |
572 | __func__, bank->id, status); | 572 | __func__, bank->id, status); |
573 | handle_bad_irq(irq, desc); | 573 | handle_bad_irq(desc); |
574 | return; | 574 | return; |
575 | } | 575 | } |
576 | 576 | ||
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c index ae8f29fb5536..1f0af250dbb5 100644 --- a/drivers/pinctrl/spear/pinctrl-plgpio.c +++ b/drivers/pinctrl/spear/pinctrl-plgpio.c | |||
@@ -356,7 +356,7 @@ static struct irq_chip plgpio_irqchip = { | |||
356 | .irq_set_type = plgpio_irq_set_type, | 356 | .irq_set_type = plgpio_irq_set_type, |
357 | }; | 357 | }; |
358 | 358 | ||
359 | static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) | 359 | static void plgpio_irq_handler(struct irq_desc *desc) |
360 | { | 360 | { |
361 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 361 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); |
362 | struct plgpio *plgpio = container_of(gc, struct plgpio, chip); | 362 | struct plgpio *plgpio = container_of(gc, struct plgpio, chip); |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c index 63676617bc59..f9a3f8f446f7 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c | |||
@@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = { | |||
653 | SUNXI_FUNCTION(0x0, "gpio_in"), | 653 | SUNXI_FUNCTION(0x0, "gpio_in"), |
654 | SUNXI_FUNCTION(0x1, "gpio_out"), | 654 | SUNXI_FUNCTION(0x1, "gpio_out"), |
655 | SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ | 655 | SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ |
656 | SUNXI_FUNCTION(0x3, "uart3"), /* PWM1 */ | 656 | SUNXI_FUNCTION(0x3, "pwm"), /* PWM1 */ |
657 | SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ | 657 | SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ |
658 | SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */ | 658 | SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */ |
659 | }; | 659 | }; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index fb4669c0ce0e..38e0c7bdd2ac 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -617,13 +617,11 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) | |||
617 | spin_lock_irqsave(&pctl->lock, flags); | 617 | spin_lock_irqsave(&pctl->lock, flags); |
618 | 618 | ||
619 | if (type & IRQ_TYPE_LEVEL_MASK) | 619 | if (type & IRQ_TYPE_LEVEL_MASK) |
620 | __irq_set_chip_handler_name_locked(d->irq, | 620 | irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_level_irq_chip, |
621 | &sunxi_pinctrl_level_irq_chip, | 621 | handle_fasteoi_irq, NULL); |
622 | handle_fasteoi_irq, NULL); | ||
623 | else | 622 | else |
624 | __irq_set_chip_handler_name_locked(d->irq, | 623 | irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_edge_irq_chip, |
625 | &sunxi_pinctrl_edge_irq_chip, | 624 | handle_edge_irq, NULL); |
626 | handle_edge_irq, NULL); | ||
627 | 625 | ||
628 | regval = readl(pctl->membase + reg); | 626 | regval = readl(pctl->membase + reg); |
629 | regval &= ~(IRQ_CFG_IRQ_MASK << index); | 627 | regval &= ~(IRQ_CFG_IRQ_MASK << index); |
@@ -742,7 +740,7 @@ static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = { | |||
742 | .xlate = sunxi_pinctrl_irq_of_xlate, | 740 | .xlate = sunxi_pinctrl_irq_of_xlate, |
743 | }; | 741 | }; |
744 | 742 | ||
745 | static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc) | 743 | static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) |
746 | { | 744 | { |
747 | unsigned int irq = irq_desc_get_irq(desc); | 745 | unsigned int irq = irq_desc_get_irq(desc); |
748 | struct irq_chip *chip = irq_desc_get_chip(desc); | 746 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c index 7e9dae54fcb2..2df8bbecebfc 100644 --- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c +++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c | |||
@@ -22,49 +22,49 @@ | |||
22 | #define DRIVER_NAME "ph1-sld8-pinctrl" | 22 | #define DRIVER_NAME "ph1-sld8-pinctrl" |
23 | 23 | ||
24 | static const struct pinctrl_pin_desc ph1_sld8_pins[] = { | 24 | static const struct pinctrl_pin_desc ph1_sld8_pins[] = { |
25 | UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE, | 25 | UNIPHIER_PINCTRL_PIN(0, "PCA00", 0, |
26 | 15, UNIPHIER_PIN_DRV_4_8, | 26 | 15, UNIPHIER_PIN_DRV_4_8, |
27 | 15, UNIPHIER_PIN_PULL_DOWN), | 27 | 15, UNIPHIER_PIN_PULL_DOWN), |
28 | UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE, | 28 | UNIPHIER_PINCTRL_PIN(1, "PCA01", 0, |
29 | 16, UNIPHIER_PIN_DRV_4_8, | 29 | 16, UNIPHIER_PIN_DRV_4_8, |
30 | 16, UNIPHIER_PIN_PULL_DOWN), | 30 | 16, UNIPHIER_PIN_PULL_DOWN), |
31 | UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE, | 31 | UNIPHIER_PINCTRL_PIN(2, "PCA02", 0, |
32 | 17, UNIPHIER_PIN_DRV_4_8, | 32 | 17, UNIPHIER_PIN_DRV_4_8, |
33 | 17, UNIPHIER_PIN_PULL_DOWN), | 33 | 17, UNIPHIER_PIN_PULL_DOWN), |
34 | UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE, | 34 | UNIPHIER_PINCTRL_PIN(3, "PCA03", 0, |
35 | 18, UNIPHIER_PIN_DRV_4_8, | 35 | 18, UNIPHIER_PIN_DRV_4_8, |
36 | 18, UNIPHIER_PIN_PULL_DOWN), | 36 | 18, UNIPHIER_PIN_PULL_DOWN), |
37 | UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE, | 37 | UNIPHIER_PINCTRL_PIN(4, "PCA04", 0, |
38 | 19, UNIPHIER_PIN_DRV_4_8, | 38 | 19, UNIPHIER_PIN_DRV_4_8, |
39 | 19, UNIPHIER_PIN_PULL_DOWN), | 39 | 19, UNIPHIER_PIN_PULL_DOWN), |
40 | UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE, | 40 | UNIPHIER_PINCTRL_PIN(5, "PCA05", 0, |
41 | 20, UNIPHIER_PIN_DRV_4_8, | 41 | 20, UNIPHIER_PIN_DRV_4_8, |
42 | 20, UNIPHIER_PIN_PULL_DOWN), | 42 | 20, UNIPHIER_PIN_PULL_DOWN), |
43 | UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE, | 43 | UNIPHIER_PINCTRL_PIN(6, "PCA06", 0, |
44 | 21, UNIPHIER_PIN_DRV_4_8, | 44 | 21, UNIPHIER_PIN_DRV_4_8, |
45 | 21, UNIPHIER_PIN_PULL_DOWN), | 45 | 21, UNIPHIER_PIN_PULL_DOWN), |
46 | UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE, | 46 | UNIPHIER_PINCTRL_PIN(7, "PCA07", 0, |
47 | 22, UNIPHIER_PIN_DRV_4_8, | 47 | 22, UNIPHIER_PIN_DRV_4_8, |
48 | 22, UNIPHIER_PIN_PULL_DOWN), | 48 | 22, UNIPHIER_PIN_PULL_DOWN), |
49 | UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE, | 49 | UNIPHIER_PINCTRL_PIN(8, "PCA08", 0, |
50 | 23, UNIPHIER_PIN_DRV_4_8, | 50 | 23, UNIPHIER_PIN_DRV_4_8, |
51 | 23, UNIPHIER_PIN_PULL_DOWN), | 51 | 23, UNIPHIER_PIN_PULL_DOWN), |
52 | UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE, | 52 | UNIPHIER_PINCTRL_PIN(9, "PCA09", 0, |
53 | 24, UNIPHIER_PIN_DRV_4_8, | 53 | 24, UNIPHIER_PIN_DRV_4_8, |
54 | 24, UNIPHIER_PIN_PULL_DOWN), | 54 | 24, UNIPHIER_PIN_PULL_DOWN), |
55 | UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE, | 55 | UNIPHIER_PINCTRL_PIN(10, "PCA10", 0, |
56 | 25, UNIPHIER_PIN_DRV_4_8, | 56 | 25, UNIPHIER_PIN_DRV_4_8, |
57 | 25, UNIPHIER_PIN_PULL_DOWN), | 57 | 25, UNIPHIER_PIN_PULL_DOWN), |
58 | UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE, | 58 | UNIPHIER_PINCTRL_PIN(11, "PCA11", 0, |
59 | 26, UNIPHIER_PIN_DRV_4_8, | 59 | 26, UNIPHIER_PIN_DRV_4_8, |
60 | 26, UNIPHIER_PIN_PULL_DOWN), | 60 | 26, UNIPHIER_PIN_PULL_DOWN), |
61 | UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE, | 61 | UNIPHIER_PINCTRL_PIN(12, "PCA12", 0, |
62 | 27, UNIPHIER_PIN_DRV_4_8, | 62 | 27, UNIPHIER_PIN_DRV_4_8, |
63 | 27, UNIPHIER_PIN_PULL_DOWN), | 63 | 27, UNIPHIER_PIN_PULL_DOWN), |
64 | UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE, | 64 | UNIPHIER_PINCTRL_PIN(13, "PCA13", 0, |
65 | 28, UNIPHIER_PIN_DRV_4_8, | 65 | 28, UNIPHIER_PIN_DRV_4_8, |
66 | 28, UNIPHIER_PIN_PULL_DOWN), | 66 | 28, UNIPHIER_PIN_PULL_DOWN), |
67 | UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE, | 67 | UNIPHIER_PINCTRL_PIN(14, "PCA14", 0, |
68 | 29, UNIPHIER_PIN_DRV_4_8, | 68 | 29, UNIPHIER_PIN_DRV_4_8, |
69 | 29, UNIPHIER_PIN_PULL_DOWN), | 69 | 29, UNIPHIER_PIN_PULL_DOWN), |
70 | UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, | 70 | UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, |
@@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { | |||
118 | UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, | 118 | UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, |
119 | 36, UNIPHIER_PIN_DRV_8_12_16_20, | 119 | 36, UNIPHIER_PIN_DRV_8_12_16_20, |
120 | 128, UNIPHIER_PIN_PULL_UP), | 120 | 128, UNIPHIER_PIN_PULL_UP), |
121 | UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, | 121 | UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8, |
122 | 40, UNIPHIER_PIN_DRV_8_12_16_20, | 122 | 40, UNIPHIER_PIN_DRV_8_12_16_20, |
123 | -1, UNIPHIER_PIN_PULL_NONE), | 123 | -1, UNIPHIER_PIN_PULL_NONE), |
124 | UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, | 124 | UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8, |
125 | 44, UNIPHIER_PIN_DRV_8_12_16_20, | 125 | 44, UNIPHIER_PIN_DRV_8_12_16_20, |
126 | -1, UNIPHIER_PIN_PULL_NONE), | 126 | -1, UNIPHIER_PIN_PULL_NONE), |
127 | UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, | 127 | UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8, |
128 | 48, UNIPHIER_PIN_DRV_8_12_16_20, | 128 | 48, UNIPHIER_PIN_DRV_8_12_16_20, |
129 | -1, UNIPHIER_PIN_PULL_NONE), | 129 | -1, UNIPHIER_PIN_PULL_NONE), |
130 | UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, | 130 | UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8, |
131 | 52, UNIPHIER_PIN_DRV_8_12_16_20, | 131 | 52, UNIPHIER_PIN_DRV_8_12_16_20, |
132 | -1, UNIPHIER_PIN_PULL_NONE), | 132 | -1, UNIPHIER_PIN_PULL_NONE), |
133 | UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, | 133 | UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8, |
134 | 56, UNIPHIER_PIN_DRV_8_12_16_20, | 134 | 56, UNIPHIER_PIN_DRV_8_12_16_20, |
135 | -1, UNIPHIER_PIN_PULL_NONE), | 135 | -1, UNIPHIER_PIN_PULL_NONE), |
136 | UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, | 136 | UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8, |
137 | 60, UNIPHIER_PIN_DRV_8_12_16_20, | 137 | 60, UNIPHIER_PIN_DRV_8_12_16_20, |
138 | -1, UNIPHIER_PIN_PULL_NONE), | 138 | -1, UNIPHIER_PIN_PULL_NONE), |
139 | UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE, | 139 | UNIPHIER_PINCTRL_PIN(38, "SDCD", 8, |
140 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 140 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
141 | 129, UNIPHIER_PIN_PULL_DOWN), | 141 | 129, UNIPHIER_PIN_PULL_DOWN), |
142 | UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE, | 142 | UNIPHIER_PINCTRL_PIN(39, "SDWP", 8, |
143 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 143 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
144 | 130, UNIPHIER_PIN_PULL_DOWN), | 144 | 130, UNIPHIER_PIN_PULL_DOWN), |
145 | UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, | 145 | UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9, |
146 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 146 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
147 | 131, UNIPHIER_PIN_PULL_DOWN), | 147 | 131, UNIPHIER_PIN_PULL_DOWN), |
148 | UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, | 148 | UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0, |
149 | 37, UNIPHIER_PIN_DRV_4_8, | 149 | 37, UNIPHIER_PIN_DRV_4_8, |
150 | 37, UNIPHIER_PIN_PULL_DOWN), | 150 | 37, UNIPHIER_PIN_PULL_DOWN), |
151 | UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, | 151 | UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0, |
152 | 38, UNIPHIER_PIN_DRV_4_8, | 152 | 38, UNIPHIER_PIN_DRV_4_8, |
153 | 38, UNIPHIER_PIN_PULL_DOWN), | 153 | 38, UNIPHIER_PIN_PULL_DOWN), |
154 | UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, | 154 | UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0, |
155 | 39, UNIPHIER_PIN_DRV_4_8, | 155 | 39, UNIPHIER_PIN_DRV_4_8, |
156 | 39, UNIPHIER_PIN_PULL_DOWN), | 156 | 39, UNIPHIER_PIN_PULL_DOWN), |
157 | UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, | 157 | UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0, |
158 | 40, UNIPHIER_PIN_DRV_4_8, | 158 | 40, UNIPHIER_PIN_DRV_4_8, |
159 | 40, UNIPHIER_PIN_PULL_DOWN), | 159 | 40, UNIPHIER_PIN_PULL_DOWN), |
160 | UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE, | 160 | UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0, |
161 | 41, UNIPHIER_PIN_DRV_4_8, | 161 | 41, UNIPHIER_PIN_DRV_4_8, |
162 | 41, UNIPHIER_PIN_PULL_DOWN), | 162 | 41, UNIPHIER_PIN_PULL_DOWN), |
163 | UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE, | 163 | UNIPHIER_PINCTRL_PIN(46, "PCREG", 0, |
164 | 42, UNIPHIER_PIN_DRV_4_8, | 164 | 42, UNIPHIER_PIN_DRV_4_8, |
165 | 42, UNIPHIER_PIN_PULL_DOWN), | 165 | 42, UNIPHIER_PIN_PULL_DOWN), |
166 | UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE, | 166 | UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0, |
167 | 43, UNIPHIER_PIN_DRV_4_8, | 167 | 43, UNIPHIER_PIN_DRV_4_8, |
168 | 43, UNIPHIER_PIN_PULL_DOWN), | 168 | 43, UNIPHIER_PIN_PULL_DOWN), |
169 | UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE, | 169 | UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0, |
170 | 44, UNIPHIER_PIN_DRV_4_8, | 170 | 44, UNIPHIER_PIN_DRV_4_8, |
171 | 44, UNIPHIER_PIN_PULL_DOWN), | 171 | 44, UNIPHIER_PIN_PULL_DOWN), |
172 | UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE, | 172 | UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0, |
173 | 45, UNIPHIER_PIN_DRV_4_8, | 173 | 45, UNIPHIER_PIN_DRV_4_8, |
174 | 45, UNIPHIER_PIN_PULL_DOWN), | 174 | 45, UNIPHIER_PIN_PULL_DOWN), |
175 | UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE, | 175 | UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0, |
176 | 46, UNIPHIER_PIN_DRV_4_8, | 176 | 46, UNIPHIER_PIN_DRV_4_8, |
177 | 46, UNIPHIER_PIN_PULL_DOWN), | 177 | 46, UNIPHIER_PIN_PULL_DOWN), |
178 | UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE, | 178 | UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0, |
179 | 47, UNIPHIER_PIN_DRV_4_8, | 179 | 47, UNIPHIER_PIN_DRV_4_8, |
180 | 47, UNIPHIER_PIN_PULL_DOWN), | 180 | 47, UNIPHIER_PIN_PULL_DOWN), |
181 | UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE, | 181 | UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0, |
182 | 48, UNIPHIER_PIN_DRV_4_8, | 182 | 48, UNIPHIER_PIN_DRV_4_8, |
183 | 48, UNIPHIER_PIN_PULL_DOWN), | 183 | 48, UNIPHIER_PIN_PULL_DOWN), |
184 | UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE, | 184 | UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0, |
185 | 49, UNIPHIER_PIN_DRV_4_8, | 185 | 49, UNIPHIER_PIN_DRV_4_8, |
186 | 49, UNIPHIER_PIN_PULL_DOWN), | 186 | 49, UNIPHIER_PIN_PULL_DOWN), |
187 | UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE, | 187 | UNIPHIER_PINCTRL_PIN(54, "PCWE", 0, |
188 | 50, UNIPHIER_PIN_DRV_4_8, | 188 | 50, UNIPHIER_PIN_DRV_4_8, |
189 | 50, UNIPHIER_PIN_PULL_DOWN), | 189 | 50, UNIPHIER_PIN_PULL_DOWN), |
190 | UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE, | 190 | UNIPHIER_PINCTRL_PIN(55, "PCOE", 0, |
191 | 51, UNIPHIER_PIN_DRV_4_8, | 191 | 51, UNIPHIER_PIN_DRV_4_8, |
192 | 51, UNIPHIER_PIN_PULL_DOWN), | 192 | 51, UNIPHIER_PIN_PULL_DOWN), |
193 | UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE, | 193 | UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0, |
194 | 52, UNIPHIER_PIN_DRV_4_8, | 194 | 52, UNIPHIER_PIN_DRV_4_8, |
195 | 52, UNIPHIER_PIN_PULL_DOWN), | 195 | 52, UNIPHIER_PIN_PULL_DOWN), |
196 | UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE, | 196 | UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0, |
197 | 53, UNIPHIER_PIN_DRV_4_8, | 197 | 53, UNIPHIER_PIN_DRV_4_8, |
198 | 53, UNIPHIER_PIN_PULL_DOWN), | 198 | 53, UNIPHIER_PIN_PULL_DOWN), |
199 | UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE, | 199 | UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0, |
200 | 54, UNIPHIER_PIN_DRV_4_8, | 200 | 54, UNIPHIER_PIN_DRV_4_8, |
201 | 54, UNIPHIER_PIN_PULL_DOWN), | 201 | 54, UNIPHIER_PIN_PULL_DOWN), |
202 | UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE, | 202 | UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0, |
203 | 55, UNIPHIER_PIN_DRV_4_8, | 203 | 55, UNIPHIER_PIN_DRV_4_8, |
204 | 55, UNIPHIER_PIN_PULL_DOWN), | 204 | 55, UNIPHIER_PIN_PULL_DOWN), |
205 | UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE, | 205 | UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0, |
206 | 56, UNIPHIER_PIN_DRV_4_8, | 206 | 56, UNIPHIER_PIN_DRV_4_8, |
207 | 56, UNIPHIER_PIN_PULL_DOWN), | 207 | 56, UNIPHIER_PIN_PULL_DOWN), |
208 | UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE, | 208 | UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0, |
209 | 57, UNIPHIER_PIN_DRV_4_8, | 209 | 57, UNIPHIER_PIN_DRV_4_8, |
210 | 57, UNIPHIER_PIN_PULL_DOWN), | 210 | 57, UNIPHIER_PIN_PULL_DOWN), |
211 | UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE, | 211 | UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0, |
212 | 58, UNIPHIER_PIN_DRV_4_8, | 212 | 58, UNIPHIER_PIN_DRV_4_8, |
213 | 58, UNIPHIER_PIN_PULL_DOWN), | 213 | 58, UNIPHIER_PIN_PULL_DOWN), |
214 | UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE, | 214 | UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0, |
215 | 59, UNIPHIER_PIN_DRV_4_8, | 215 | 59, UNIPHIER_PIN_DRV_4_8, |
216 | 59, UNIPHIER_PIN_PULL_DOWN), | 216 | 59, UNIPHIER_PIN_PULL_DOWN), |
217 | UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE, | 217 | UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0, |
218 | 60, UNIPHIER_PIN_DRV_4_8, | 218 | 60, UNIPHIER_PIN_DRV_4_8, |
219 | 60, UNIPHIER_PIN_PULL_DOWN), | 219 | 60, UNIPHIER_PIN_PULL_DOWN), |
220 | UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE, | 220 | UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0, |
221 | 61, UNIPHIER_PIN_DRV_4_8, | 221 | 61, UNIPHIER_PIN_DRV_4_8, |
222 | 61, UNIPHIER_PIN_PULL_DOWN), | 222 | 61, UNIPHIER_PIN_PULL_DOWN), |
223 | UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE, | 223 | UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0, |
224 | 62, UNIPHIER_PIN_DRV_4_8, | 224 | 62, UNIPHIER_PIN_DRV_4_8, |
225 | 62, UNIPHIER_PIN_PULL_DOWN), | 225 | 62, UNIPHIER_PIN_PULL_DOWN), |
226 | UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE, | 226 | UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0, |
227 | 63, UNIPHIER_PIN_DRV_4_8, | 227 | 63, UNIPHIER_PIN_DRV_4_8, |
228 | 63, UNIPHIER_PIN_PULL_DOWN), | 228 | 63, UNIPHIER_PIN_PULL_DOWN), |
229 | UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE, | 229 | UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0, |
230 | 64, UNIPHIER_PIN_DRV_4_8, | 230 | 64, UNIPHIER_PIN_DRV_4_8, |
231 | 64, UNIPHIER_PIN_PULL_DOWN), | 231 | 64, UNIPHIER_PIN_PULL_DOWN), |
232 | UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE, | 232 | UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0, |
233 | 65, UNIPHIER_PIN_DRV_4_8, | 233 | 65, UNIPHIER_PIN_DRV_4_8, |
234 | 65, UNIPHIER_PIN_PULL_DOWN), | 234 | 65, UNIPHIER_PIN_PULL_DOWN), |
235 | UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE, | 235 | UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0, |
236 | 66, UNIPHIER_PIN_DRV_4_8, | 236 | 66, UNIPHIER_PIN_DRV_4_8, |
237 | 66, UNIPHIER_PIN_PULL_DOWN), | 237 | 66, UNIPHIER_PIN_PULL_DOWN), |
238 | UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE, | 238 | UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0, |
239 | 67, UNIPHIER_PIN_DRV_4_8, | 239 | 67, UNIPHIER_PIN_DRV_4_8, |
240 | 67, UNIPHIER_PIN_PULL_DOWN), | 240 | 67, UNIPHIER_PIN_PULL_DOWN), |
241 | UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE, | 241 | UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0, |
242 | 68, UNIPHIER_PIN_DRV_4_8, | 242 | 68, UNIPHIER_PIN_DRV_4_8, |
243 | 68, UNIPHIER_PIN_PULL_DOWN), | 243 | 68, UNIPHIER_PIN_PULL_DOWN), |
244 | UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE, | 244 | UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0, |
245 | 69, UNIPHIER_PIN_DRV_4_8, | 245 | 69, UNIPHIER_PIN_DRV_4_8, |
246 | 69, UNIPHIER_PIN_PULL_DOWN), | 246 | 69, UNIPHIER_PIN_PULL_DOWN), |
247 | UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE, | 247 | UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0, |
248 | 70, UNIPHIER_PIN_DRV_4_8, | 248 | 70, UNIPHIER_PIN_DRV_4_8, |
249 | 70, UNIPHIER_PIN_PULL_DOWN), | 249 | 70, UNIPHIER_PIN_PULL_DOWN), |
250 | UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE, | 250 | UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0, |
251 | 71, UNIPHIER_PIN_DRV_4_8, | 251 | 71, UNIPHIER_PIN_DRV_4_8, |
252 | 71, UNIPHIER_PIN_PULL_DOWN), | 252 | 71, UNIPHIER_PIN_PULL_DOWN), |
253 | UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE, | 253 | UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0, |
254 | 72, UNIPHIER_PIN_DRV_4_8, | 254 | 72, UNIPHIER_PIN_DRV_4_8, |
255 | 72, UNIPHIER_PIN_PULL_DOWN), | 255 | 72, UNIPHIER_PIN_PULL_DOWN), |
256 | UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE, | 256 | UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0, |
257 | 73, UNIPHIER_PIN_DRV_4_8, | 257 | 73, UNIPHIER_PIN_DRV_4_8, |
258 | 73, UNIPHIER_PIN_PULL_DOWN), | 258 | 73, UNIPHIER_PIN_PULL_DOWN), |
259 | UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE, | 259 | UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0, |
260 | 74, UNIPHIER_PIN_DRV_4_8, | 260 | 74, UNIPHIER_PIN_DRV_4_8, |
261 | 74, UNIPHIER_PIN_PULL_DOWN), | 261 | 74, UNIPHIER_PIN_PULL_DOWN), |
262 | UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE, | 262 | UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0, |
263 | 75, UNIPHIER_PIN_DRV_4_8, | 263 | 75, UNIPHIER_PIN_DRV_4_8, |
264 | 75, UNIPHIER_PIN_PULL_DOWN), | 264 | 75, UNIPHIER_PIN_PULL_DOWN), |
265 | UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE, | 265 | UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0, |
266 | 76, UNIPHIER_PIN_DRV_4_8, | 266 | 76, UNIPHIER_PIN_DRV_4_8, |
267 | 76, UNIPHIER_PIN_PULL_DOWN), | 267 | 76, UNIPHIER_PIN_PULL_DOWN), |
268 | UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, | 268 | UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0, |
269 | 77, UNIPHIER_PIN_DRV_4_8, | 269 | 77, UNIPHIER_PIN_DRV_4_8, |
270 | 77, UNIPHIER_PIN_PULL_DOWN), | 270 | 77, UNIPHIER_PIN_PULL_DOWN), |
271 | UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, | 271 | UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0, |
272 | 78, UNIPHIER_PIN_DRV_4_8, | 272 | 78, UNIPHIER_PIN_DRV_4_8, |
273 | 78, UNIPHIER_PIN_PULL_DOWN), | 273 | 78, UNIPHIER_PIN_PULL_DOWN), |
274 | UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, | 274 | UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0, |
275 | 79, UNIPHIER_PIN_DRV_4_8, | 275 | 79, UNIPHIER_PIN_DRV_4_8, |
276 | 79, UNIPHIER_PIN_PULL_DOWN), | 276 | 79, UNIPHIER_PIN_PULL_DOWN), |
277 | UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, | 277 | UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0, |
278 | 80, UNIPHIER_PIN_DRV_4_8, | 278 | 80, UNIPHIER_PIN_DRV_4_8, |
279 | 80, UNIPHIER_PIN_PULL_DOWN), | 279 | 80, UNIPHIER_PIN_PULL_DOWN), |
280 | UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, | 280 | UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0, |
281 | 81, UNIPHIER_PIN_DRV_4_8, | 281 | 81, UNIPHIER_PIN_DRV_4_8, |
282 | 81, UNIPHIER_PIN_PULL_DOWN), | 282 | 81, UNIPHIER_PIN_PULL_DOWN), |
283 | UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, | 283 | UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0, |
284 | 82, UNIPHIER_PIN_DRV_4_8, | 284 | 82, UNIPHIER_PIN_DRV_4_8, |
285 | 82, UNIPHIER_PIN_PULL_DOWN), | 285 | 82, UNIPHIER_PIN_PULL_DOWN), |
286 | UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, | 286 | UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0, |
287 | 83, UNIPHIER_PIN_DRV_4_8, | 287 | 83, UNIPHIER_PIN_DRV_4_8, |
288 | 83, UNIPHIER_PIN_PULL_DOWN), | 288 | 83, UNIPHIER_PIN_PULL_DOWN), |
289 | UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, | 289 | UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0, |
290 | 84, UNIPHIER_PIN_DRV_4_8, | 290 | 84, UNIPHIER_PIN_DRV_4_8, |
291 | 84, UNIPHIER_PIN_PULL_DOWN), | 291 | 84, UNIPHIER_PIN_PULL_DOWN), |
292 | UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, | 292 | UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0, |
293 | 85, UNIPHIER_PIN_DRV_4_8, | 293 | 85, UNIPHIER_PIN_DRV_4_8, |
294 | 85, UNIPHIER_PIN_PULL_DOWN), | 294 | 85, UNIPHIER_PIN_PULL_DOWN), |
295 | UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, | 295 | UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0, |
296 | 86, UNIPHIER_PIN_DRV_4_8, | 296 | 86, UNIPHIER_PIN_DRV_4_8, |
297 | 86, UNIPHIER_PIN_PULL_DOWN), | 297 | 86, UNIPHIER_PIN_PULL_DOWN), |
298 | UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, | 298 | UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0, |
299 | 87, UNIPHIER_PIN_DRV_4_8, | 299 | 87, UNIPHIER_PIN_DRV_4_8, |
300 | 87, UNIPHIER_PIN_PULL_DOWN), | 300 | 87, UNIPHIER_PIN_PULL_DOWN), |
301 | UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE, | 301 | UNIPHIER_PINCTRL_PIN(92, "AGCI", 3, |
302 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 302 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
303 | 132, UNIPHIER_PIN_PULL_DOWN), | 303 | 132, UNIPHIER_PIN_PULL_DOWN), |
304 | UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE, | 304 | UNIPHIER_PINCTRL_PIN(93, "AGCR", 4, |
305 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 305 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
306 | 133, UNIPHIER_PIN_PULL_DOWN), | 306 | 133, UNIPHIER_PIN_PULL_DOWN), |
307 | UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE, | 307 | UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5, |
308 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 308 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
309 | 134, UNIPHIER_PIN_PULL_DOWN), | 309 | 134, UNIPHIER_PIN_PULL_DOWN), |
310 | UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE, | 310 | UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0, |
311 | 88, UNIPHIER_PIN_DRV_4_8, | 311 | 88, UNIPHIER_PIN_DRV_4_8, |
312 | 88, UNIPHIER_PIN_PULL_DOWN), | 312 | 88, UNIPHIER_PIN_PULL_DOWN), |
313 | UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE, | 313 | UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0, |
314 | 89, UNIPHIER_PIN_DRV_4_8, | 314 | 89, UNIPHIER_PIN_DRV_4_8, |
315 | 89, UNIPHIER_PIN_PULL_DOWN), | 315 | 89, UNIPHIER_PIN_PULL_DOWN), |
316 | UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, | 316 | UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, |
@@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { | |||
325 | UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, | 325 | UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, |
326 | 93, UNIPHIER_PIN_DRV_4_8, | 326 | 93, UNIPHIER_PIN_DRV_4_8, |
327 | 93, UNIPHIER_PIN_PULL_UP), | 327 | 93, UNIPHIER_PIN_PULL_UP), |
328 | UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE, | 328 | UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, |
329 | 94, UNIPHIER_PIN_DRV_4_8, | 329 | 94, UNIPHIER_PIN_DRV_4_8, |
330 | 94, UNIPHIER_PIN_PULL_DOWN), | 330 | 94, UNIPHIER_PIN_PULL_DOWN), |
331 | UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE, | 331 | UNIPHIER_PINCTRL_PIN(102, "SDA0", 10, |
332 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 332 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
333 | -1, UNIPHIER_PIN_PULL_NONE), | 333 | -1, UNIPHIER_PIN_PULL_NONE), |
334 | UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE, | 334 | UNIPHIER_PINCTRL_PIN(103, "SCL0", 10, |
335 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 335 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
336 | -1, UNIPHIER_PIN_PULL_NONE), | 336 | -1, UNIPHIER_PIN_PULL_NONE), |
337 | UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE, | 337 | UNIPHIER_PINCTRL_PIN(104, "SDA1", 11, |
338 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 338 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
339 | -1, UNIPHIER_PIN_PULL_NONE), | 339 | -1, UNIPHIER_PIN_PULL_NONE), |
340 | UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE, | 340 | UNIPHIER_PINCTRL_PIN(105, "SCL1", 11, |
341 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 341 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
342 | -1, UNIPHIER_PIN_PULL_NONE), | 342 | -1, UNIPHIER_PIN_PULL_NONE), |
343 | UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, | 343 | UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12, |
344 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 344 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
345 | -1, UNIPHIER_PIN_PULL_NONE), | 345 | -1, UNIPHIER_PIN_PULL_NONE), |
346 | UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, | 346 | UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12, |
347 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 347 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
348 | -1, UNIPHIER_PIN_PULL_NONE), | 348 | -1, UNIPHIER_PIN_PULL_NONE), |
349 | UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, | 349 | UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13, |
350 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 350 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
351 | -1, UNIPHIER_PIN_PULL_NONE), | 351 | -1, UNIPHIER_PIN_PULL_NONE), |
352 | UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, | 352 | UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13, |
353 | -1, UNIPHIER_PIN_DRV_FIXED_4, | 353 | -1, UNIPHIER_PIN_DRV_FIXED_4, |
354 | -1, UNIPHIER_PIN_PULL_NONE), | 354 | -1, UNIPHIER_PIN_PULL_NONE), |
355 | UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, | 355 | UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, |
@@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { | |||
358 | UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, | 358 | UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, |
359 | 96, UNIPHIER_PIN_DRV_4_8, | 359 | 96, UNIPHIER_PIN_DRV_4_8, |
360 | 96, UNIPHIER_PIN_PULL_UP), | 360 | 96, UNIPHIER_PIN_PULL_UP), |
361 | UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE, | 361 | UNIPHIER_PINCTRL_PIN(112, "SBO1", 0, |
362 | 97, UNIPHIER_PIN_DRV_4_8, | 362 | 97, UNIPHIER_PIN_DRV_4_8, |
363 | 97, UNIPHIER_PIN_PULL_UP), | 363 | 97, UNIPHIER_PIN_PULL_UP), |
364 | UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE, | 364 | UNIPHIER_PINCTRL_PIN(113, "SBI1", 0, |
365 | 98, UNIPHIER_PIN_DRV_4_8, | 365 | 98, UNIPHIER_PIN_DRV_4_8, |
366 | 98, UNIPHIER_PIN_PULL_UP), | 366 | 98, UNIPHIER_PIN_PULL_UP), |
367 | UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE, | 367 | UNIPHIER_PINCTRL_PIN(114, "TXD1", 0, |
368 | 99, UNIPHIER_PIN_DRV_4_8, | 368 | 99, UNIPHIER_PIN_DRV_4_8, |
369 | 99, UNIPHIER_PIN_PULL_UP), | 369 | 99, UNIPHIER_PIN_PULL_UP), |
370 | UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE, | 370 | UNIPHIER_PINCTRL_PIN(115, "RXD1", 0, |
371 | 100, UNIPHIER_PIN_DRV_4_8, | 371 | 100, UNIPHIER_PIN_DRV_4_8, |
372 | 100, UNIPHIER_PIN_PULL_UP), | 372 | 100, UNIPHIER_PIN_PULL_UP), |
373 | UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE, | 373 | UNIPHIER_PINCTRL_PIN(116, "HIN", 1, |
374 | -1, UNIPHIER_PIN_DRV_FIXED_5, | 374 | -1, UNIPHIER_PIN_DRV_FIXED_5, |
375 | -1, UNIPHIER_PIN_PULL_NONE), | 375 | -1, UNIPHIER_PIN_PULL_NONE), |
376 | UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE, | 376 | UNIPHIER_PINCTRL_PIN(117, "VIN", 2, |
377 | -1, UNIPHIER_PIN_DRV_FIXED_5, | 377 | -1, UNIPHIER_PIN_DRV_FIXED_5, |
378 | -1, UNIPHIER_PIN_PULL_NONE), | 378 | -1, UNIPHIER_PIN_PULL_NONE), |
379 | UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE, | 379 | UNIPHIER_PINCTRL_PIN(118, "TCON0", 0, |
380 | 101, UNIPHIER_PIN_DRV_4_8, | 380 | 101, UNIPHIER_PIN_DRV_4_8, |
381 | 101, UNIPHIER_PIN_PULL_DOWN), | 381 | 101, UNIPHIER_PIN_PULL_DOWN), |
382 | UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE, | 382 | UNIPHIER_PINCTRL_PIN(119, "TCON1", 0, |
383 | 102, UNIPHIER_PIN_DRV_4_8, | 383 | 102, UNIPHIER_PIN_DRV_4_8, |
384 | 102, UNIPHIER_PIN_PULL_DOWN), | 384 | 102, UNIPHIER_PIN_PULL_DOWN), |
385 | UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE, | 385 | UNIPHIER_PINCTRL_PIN(120, "TCON2", 0, |
386 | 103, UNIPHIER_PIN_DRV_4_8, | 386 | 103, UNIPHIER_PIN_DRV_4_8, |
387 | 103, UNIPHIER_PIN_PULL_DOWN), | 387 | 103, UNIPHIER_PIN_PULL_DOWN), |
388 | UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE, | 388 | UNIPHIER_PINCTRL_PIN(121, "TCON3", 0, |
389 | 104, UNIPHIER_PIN_DRV_4_8, | 389 | 104, UNIPHIER_PIN_DRV_4_8, |
390 | 104, UNIPHIER_PIN_PULL_DOWN), | 390 | 104, UNIPHIER_PIN_PULL_DOWN), |
391 | UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE, | 391 | UNIPHIER_PINCTRL_PIN(122, "TCON4", 0, |
392 | 105, UNIPHIER_PIN_DRV_4_8, | 392 | 105, UNIPHIER_PIN_DRV_4_8, |
393 | 105, UNIPHIER_PIN_PULL_DOWN), | 393 | 105, UNIPHIER_PIN_PULL_DOWN), |
394 | UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE, | 394 | UNIPHIER_PINCTRL_PIN(123, "TCON5", 0, |
395 | 106, UNIPHIER_PIN_DRV_4_8, | 395 | 106, UNIPHIER_PIN_DRV_4_8, |
396 | 106, UNIPHIER_PIN_PULL_DOWN), | 396 | 106, UNIPHIER_PIN_PULL_DOWN), |
397 | UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE, | 397 | UNIPHIER_PINCTRL_PIN(124, "TCON6", 0, |
398 | 107, UNIPHIER_PIN_DRV_4_8, | 398 | 107, UNIPHIER_PIN_DRV_4_8, |
399 | 107, UNIPHIER_PIN_PULL_DOWN), | 399 | 107, UNIPHIER_PIN_PULL_DOWN), |
400 | UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE, | 400 | UNIPHIER_PINCTRL_PIN(125, "TCON7", 0, |
401 | 108, UNIPHIER_PIN_DRV_4_8, | 401 | 108, UNIPHIER_PIN_DRV_4_8, |
402 | 108, UNIPHIER_PIN_PULL_DOWN), | 402 | 108, UNIPHIER_PIN_PULL_DOWN), |
403 | UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE, | 403 | UNIPHIER_PINCTRL_PIN(126, "TCON8", 0, |
404 | 109, UNIPHIER_PIN_DRV_4_8, | 404 | 109, UNIPHIER_PIN_DRV_4_8, |
405 | 109, UNIPHIER_PIN_PULL_DOWN), | 405 | 109, UNIPHIER_PIN_PULL_DOWN), |
406 | UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE, | 406 | UNIPHIER_PINCTRL_PIN(127, "PWMA", 0, |
407 | 110, UNIPHIER_PIN_DRV_4_8, | 407 | 110, UNIPHIER_PIN_DRV_4_8, |
408 | 110, UNIPHIER_PIN_PULL_DOWN), | 408 | 110, UNIPHIER_PIN_PULL_DOWN), |
409 | UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, | 409 | UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0, |
410 | 111, UNIPHIER_PIN_DRV_4_8, | 410 | 111, UNIPHIER_PIN_DRV_4_8, |
411 | 111, UNIPHIER_PIN_PULL_DOWN), | 411 | 111, UNIPHIER_PIN_PULL_DOWN), |
412 | UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, | 412 | UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0, |
413 | 112, UNIPHIER_PIN_DRV_4_8, | 413 | 112, UNIPHIER_PIN_DRV_4_8, |
414 | 112, UNIPHIER_PIN_PULL_DOWN), | 414 | 112, UNIPHIER_PIN_PULL_DOWN), |
415 | UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, | 415 | UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0, |
416 | 113, UNIPHIER_PIN_DRV_4_8, | 416 | 113, UNIPHIER_PIN_DRV_4_8, |
417 | 113, UNIPHIER_PIN_PULL_DOWN), | 417 | 113, UNIPHIER_PIN_PULL_DOWN), |
418 | UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, | 418 | UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0, |
419 | 114, UNIPHIER_PIN_DRV_4_8, | 419 | 114, UNIPHIER_PIN_DRV_4_8, |
420 | 114, UNIPHIER_PIN_PULL_DOWN), | 420 | 114, UNIPHIER_PIN_PULL_DOWN), |
421 | UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, | 421 | UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0, |
422 | 115, UNIPHIER_PIN_DRV_4_8, | 422 | 115, UNIPHIER_PIN_DRV_4_8, |
423 | 115, UNIPHIER_PIN_PULL_DOWN), | 423 | 115, UNIPHIER_PIN_PULL_DOWN), |
424 | UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, | 424 | UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0, |
425 | 116, UNIPHIER_PIN_DRV_4_8, | 425 | 116, UNIPHIER_PIN_DRV_4_8, |
426 | 116, UNIPHIER_PIN_PULL_DOWN), | 426 | 116, UNIPHIER_PIN_PULL_DOWN), |
427 | UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, | 427 | UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0, |
428 | 117, UNIPHIER_PIN_DRV_4_8, | 428 | 117, UNIPHIER_PIN_DRV_4_8, |
429 | 117, UNIPHIER_PIN_PULL_DOWN), | 429 | 117, UNIPHIER_PIN_PULL_DOWN), |
430 | UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, | 430 | UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0, |
431 | 118, UNIPHIER_PIN_DRV_4_8, | 431 | 118, UNIPHIER_PIN_DRV_4_8, |
432 | 118, UNIPHIER_PIN_PULL_DOWN), | 432 | 118, UNIPHIER_PIN_PULL_DOWN), |
433 | }; | 433 | }; |
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index abdaed34c728..131fee2b093e 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c | |||
@@ -128,6 +128,24 @@ static const struct dmi_system_id asus_quirks[] = { | |||
128 | }, | 128 | }, |
129 | { | 129 | { |
130 | .callback = dmi_matched, | 130 | .callback = dmi_matched, |
131 | .ident = "ASUSTeK COMPUTER INC. X456UA", | ||
132 | .matches = { | ||
133 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
134 | DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"), | ||
135 | }, | ||
136 | .driver_data = &quirk_asus_wapf4, | ||
137 | }, | ||
138 | { | ||
139 | .callback = dmi_matched, | ||
140 | .ident = "ASUSTeK COMPUTER INC. X456UF", | ||
141 | .matches = { | ||
142 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
143 | DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), | ||
144 | }, | ||
145 | .driver_data = &quirk_asus_wapf4, | ||
146 | }, | ||
147 | { | ||
148 | .callback = dmi_matched, | ||
131 | .ident = "ASUSTeK COMPUTER INC. X501U", | 149 | .ident = "ASUSTeK COMPUTER INC. X501U", |
132 | .matches = { | 150 | .matches = { |
133 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | 151 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 06697315a088..fb4dd7b3ee71 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
54 | #define HPWMI_HARDWARE_QUERY 0x4 | 54 | #define HPWMI_HARDWARE_QUERY 0x4 |
55 | #define HPWMI_WIRELESS_QUERY 0x5 | 55 | #define HPWMI_WIRELESS_QUERY 0x5 |
56 | #define HPWMI_BIOS_QUERY 0x9 | 56 | #define HPWMI_BIOS_QUERY 0x9 |
57 | #define HPWMI_FEATURE_QUERY 0xb | ||
57 | #define HPWMI_HOTKEY_QUERY 0xc | 58 | #define HPWMI_HOTKEY_QUERY 0xc |
58 | #define HPWMI_FEATURE_QUERY 0xd | 59 | #define HPWMI_FEATURE2_QUERY 0xd |
59 | #define HPWMI_WIRELESS2_QUERY 0x1b | 60 | #define HPWMI_WIRELESS2_QUERY 0x1b |
60 | #define HPWMI_POSTCODEERROR_QUERY 0x2a | 61 | #define HPWMI_POSTCODEERROR_QUERY 0x2a |
61 | 62 | ||
@@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void) | |||
295 | return (state & 0x4) ? 1 : 0; | 296 | return (state & 0x4) ? 1 : 0; |
296 | } | 297 | } |
297 | 298 | ||
298 | static int __init hp_wmi_bios_2009_later(void) | 299 | static int __init hp_wmi_bios_2008_later(void) |
299 | { | 300 | { |
300 | int state = 0; | 301 | int state = 0; |
301 | int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, | 302 | int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, |
302 | sizeof(state), sizeof(state)); | 303 | sizeof(state), sizeof(state)); |
303 | if (ret) | 304 | if (!ret) |
304 | return ret; | 305 | return 1; |
305 | 306 | ||
306 | return (state & 0x10) ? 1 : 0; | 307 | return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; |
307 | } | 308 | } |
308 | 309 | ||
309 | static int hp_wmi_enable_hotkeys(void) | 310 | static int __init hp_wmi_bios_2009_later(void) |
310 | { | 311 | { |
311 | int ret; | 312 | int state = 0; |
312 | int query = 0x6e; | 313 | int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state, |
314 | sizeof(state), sizeof(state)); | ||
315 | if (!ret) | ||
316 | return 1; | ||
313 | 317 | ||
314 | ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), | 318 | return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; |
315 | 0); | 319 | } |
316 | 320 | ||
321 | static int __init hp_wmi_enable_hotkeys(void) | ||
322 | { | ||
323 | int value = 0x6e; | ||
324 | int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, | ||
325 | sizeof(value), 0); | ||
317 | if (ret) | 326 | if (ret) |
318 | return -EINVAL; | 327 | return -EINVAL; |
319 | return 0; | 328 | return 0; |
@@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void) | |||
663 | hp_wmi_tablet_state()); | 672 | hp_wmi_tablet_state()); |
664 | input_sync(hp_wmi_input_dev); | 673 | input_sync(hp_wmi_input_dev); |
665 | 674 | ||
666 | if (hp_wmi_bios_2009_later() == 4) | 675 | if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) |
667 | hp_wmi_enable_hotkeys(); | 676 | hp_wmi_enable_hotkeys(); |
668 | 677 | ||
669 | status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); | 678 | status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); |
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 6740c513919c..f2372f400ddb 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
@@ -938,7 +938,7 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state) | |||
938 | else if (result == TOS_NOT_SUPPORTED) | 938 | else if (result == TOS_NOT_SUPPORTED) |
939 | return -ENODEV; | 939 | return -ENODEV; |
940 | 940 | ||
941 | return result = TOS_SUCCESS ? 0 : -EIO; | 941 | return result == TOS_SUCCESS ? 0 : -EIO; |
942 | } | 942 | } |
943 | 943 | ||
944 | static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) | 944 | static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) |
@@ -2398,11 +2398,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) | |||
2398 | if (error) | 2398 | if (error) |
2399 | return error; | 2399 | return error; |
2400 | 2400 | ||
2401 | error = toshiba_hotkey_event_type_get(dev, &events_type); | 2401 | if (toshiba_hotkey_event_type_get(dev, &events_type)) |
2402 | if (error) { | 2402 | pr_notice("Unable to query Hotkey Event Type\n"); |
2403 | pr_err("Unable to query Hotkey Event Type\n"); | 2403 | |
2404 | return error; | ||
2405 | } | ||
2406 | dev->hotkey_event_type = events_type; | 2404 | dev->hotkey_event_type = events_type; |
2407 | 2405 | ||
2408 | dev->hotkey_dev = input_allocate_device(); | 2406 | dev->hotkey_dev = input_allocate_device(); |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index aac47573f9ed..eb391a281833 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -194,34 +194,6 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest) | |||
194 | return true; | 194 | return true; |
195 | } | 195 | } |
196 | 196 | ||
197 | /* | ||
198 | * Convert a raw GUID to the ACII string representation | ||
199 | */ | ||
200 | static int wmi_gtoa(const char *in, char *out) | ||
201 | { | ||
202 | int i; | ||
203 | |||
204 | for (i = 3; i >= 0; i--) | ||
205 | out += sprintf(out, "%02X", in[i] & 0xFF); | ||
206 | |||
207 | out += sprintf(out, "-"); | ||
208 | out += sprintf(out, "%02X", in[5] & 0xFF); | ||
209 | out += sprintf(out, "%02X", in[4] & 0xFF); | ||
210 | out += sprintf(out, "-"); | ||
211 | out += sprintf(out, "%02X", in[7] & 0xFF); | ||
212 | out += sprintf(out, "%02X", in[6] & 0xFF); | ||
213 | out += sprintf(out, "-"); | ||
214 | out += sprintf(out, "%02X", in[8] & 0xFF); | ||
215 | out += sprintf(out, "%02X", in[9] & 0xFF); | ||
216 | out += sprintf(out, "-"); | ||
217 | |||
218 | for (i = 10; i <= 15; i++) | ||
219 | out += sprintf(out, "%02X", in[i] & 0xFF); | ||
220 | |||
221 | *out = '\0'; | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static bool find_guid(const char *guid_string, struct wmi_block **out) | 197 | static bool find_guid(const char *guid_string, struct wmi_block **out) |
226 | { | 198 | { |
227 | char tmp[16], guid_input[16]; | 199 | char tmp[16], guid_input[16]; |
@@ -457,11 +429,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block); | |||
457 | 429 | ||
458 | static void wmi_dump_wdg(const struct guid_block *g) | 430 | static void wmi_dump_wdg(const struct guid_block *g) |
459 | { | 431 | { |
460 | char guid_string[37]; | 432 | pr_info("%pUL:\n", g->guid); |
461 | |||
462 | wmi_gtoa(g->guid, guid_string); | ||
463 | |||
464 | pr_info("%s:\n", guid_string); | ||
465 | pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]); | 433 | pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]); |
466 | pr_info("\tnotify_id: %02X\n", g->notify_id); | 434 | pr_info("\tnotify_id: %02X\n", g->notify_id); |
467 | pr_info("\treserved: %02X\n", g->reserved); | 435 | pr_info("\treserved: %02X\n", g->reserved); |
@@ -661,7 +629,6 @@ EXPORT_SYMBOL_GPL(wmi_has_guid); | |||
661 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | 629 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
662 | char *buf) | 630 | char *buf) |
663 | { | 631 | { |
664 | char guid_string[37]; | ||
665 | struct wmi_block *wblock; | 632 | struct wmi_block *wblock; |
666 | 633 | ||
667 | wblock = dev_get_drvdata(dev); | 634 | wblock = dev_get_drvdata(dev); |
@@ -670,9 +637,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |||
670 | return strlen(buf); | 637 | return strlen(buf); |
671 | } | 638 | } |
672 | 639 | ||
673 | wmi_gtoa(wblock->gblock.guid, guid_string); | 640 | return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid); |
674 | |||
675 | return sprintf(buf, "wmi:%s\n", guid_string); | ||
676 | } | 641 | } |
677 | static DEVICE_ATTR_RO(modalias); | 642 | static DEVICE_ATTR_RO(modalias); |
678 | 643 | ||
@@ -695,7 +660,7 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
695 | if (!wblock) | 660 | if (!wblock) |
696 | return -ENOMEM; | 661 | return -ENOMEM; |
697 | 662 | ||
698 | wmi_gtoa(wblock->gblock.guid, guid_string); | 663 | sprintf(guid_string, "%pUL", wblock->gblock.guid); |
699 | 664 | ||
700 | strcpy(&env->buf[env->buflen - 1], "wmi:"); | 665 | strcpy(&env->buf[env->buflen - 1], "wmi:"); |
701 | memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36); | 666 | memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36); |
@@ -721,12 +686,9 @@ static struct class wmi_class = { | |||
721 | static int wmi_create_device(const struct guid_block *gblock, | 686 | static int wmi_create_device(const struct guid_block *gblock, |
722 | struct wmi_block *wblock, acpi_handle handle) | 687 | struct wmi_block *wblock, acpi_handle handle) |
723 | { | 688 | { |
724 | char guid_string[37]; | ||
725 | |||
726 | wblock->dev.class = &wmi_class; | 689 | wblock->dev.class = &wmi_class; |
727 | 690 | ||
728 | wmi_gtoa(gblock->guid, guid_string); | 691 | dev_set_name(&wblock->dev, "%pUL", gblock->guid); |
729 | dev_set_name(&wblock->dev, "%s", guid_string); | ||
730 | 692 | ||
731 | dev_set_drvdata(&wblock->dev, wblock); | 693 | dev_set_drvdata(&wblock->dev, wblock); |
732 | 694 | ||
@@ -877,7 +839,6 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event) | |||
877 | struct guid_block *block; | 839 | struct guid_block *block; |
878 | struct wmi_block *wblock; | 840 | struct wmi_block *wblock; |
879 | struct list_head *p; | 841 | struct list_head *p; |
880 | char guid_string[37]; | ||
881 | 842 | ||
882 | list_for_each(p, &wmi_block_list) { | 843 | list_for_each(p, &wmi_block_list) { |
883 | wblock = list_entry(p, struct wmi_block, list); | 844 | wblock = list_entry(p, struct wmi_block, list); |
@@ -888,8 +849,8 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event) | |||
888 | if (wblock->handler) | 849 | if (wblock->handler) |
889 | wblock->handler(event, wblock->handler_data); | 850 | wblock->handler(event, wblock->handler_data); |
890 | if (debug_event) { | 851 | if (debug_event) { |
891 | wmi_gtoa(wblock->gblock.guid, guid_string); | 852 | pr_info("DEBUG Event GUID: %pUL\n", |
892 | pr_info("DEBUG Event GUID: %s\n", guid_string); | 853 | wblock->gblock.guid); |
893 | } | 854 | } |
894 | 855 | ||
895 | acpi_bus_generate_netlink_event( | 856 | acpi_bus_generate_netlink_event( |
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c index f4f2c1f76c32..74f2d3ff1d7c 100644 --- a/drivers/power/twl4030_charger.c +++ b/drivers/power/twl4030_charger.c | |||
@@ -91,7 +91,7 @@ | |||
91 | #define TWL4030_MSTATEC_COMPLETE1 0x0b | 91 | #define TWL4030_MSTATEC_COMPLETE1 0x0b |
92 | #define TWL4030_MSTATEC_COMPLETE4 0x0e | 92 | #define TWL4030_MSTATEC_COMPLETE4 0x0e |
93 | 93 | ||
94 | #if IS_ENABLED(CONFIG_TWL4030_MADC) | 94 | #if IS_REACHABLE(CONFIG_TWL4030_MADC) |
95 | /* | 95 | /* |
96 | * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11) | 96 | * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11) |
97 | * then AC is available. | 97 | * then AC is available. |
@@ -1057,13 +1057,9 @@ static int twl4030_bci_probe(struct platform_device *pdev) | |||
1057 | 1057 | ||
1058 | phynode = of_find_compatible_node(bci->dev->of_node->parent, | 1058 | phynode = of_find_compatible_node(bci->dev->of_node->parent, |
1059 | NULL, "ti,twl4030-usb"); | 1059 | NULL, "ti,twl4030-usb"); |
1060 | if (phynode) { | 1060 | if (phynode) |
1061 | bci->transceiver = devm_usb_get_phy_by_node( | 1061 | bci->transceiver = devm_usb_get_phy_by_node( |
1062 | bci->dev, phynode, &bci->usb_nb); | 1062 | bci->dev, phynode, &bci->usb_nb); |
1063 | if (IS_ERR(bci->transceiver) && | ||
1064 | PTR_ERR(bci->transceiver) == -EPROBE_DEFER) | ||
1065 | return -EPROBE_DEFER; | ||
1066 | } | ||
1067 | } | 1063 | } |
1068 | 1064 | ||
1069 | /* Enable interrupts now. */ | 1065 | /* Enable interrupts now. */ |
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 738adfa5332b..52ea605f8130 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
@@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = { | |||
318 | { .compatible = "fsl,anatop-regulator", }, | 318 | { .compatible = "fsl,anatop-regulator", }, |
319 | { /* end */ } | 319 | { /* end */ } |
320 | }; | 320 | }; |
321 | MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl); | ||
321 | 322 | ||
322 | static struct platform_driver anatop_regulator_driver = { | 323 | static struct platform_driver anatop_regulator_driver = { |
323 | .driver = { | 324 | .driver = { |
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 01bf3476a791..a9567af7cec0 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c | |||
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = { | |||
192 | AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, | 192 | AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, |
193 | AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), | 193 | AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), |
194 | AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, | 194 | AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, |
195 | AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), | 195 | AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), |
196 | AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, | 196 | AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, |
197 | AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), | 197 | AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), |
198 | /* secondary switchable output of DCDC1 */ | 198 | /* secondary switchable output of DCDC1 */ |
199 | AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, | 199 | AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, |
200 | AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), | 200 | AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7a85ac9e32c5..8a34f6acc801 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1394,15 +1394,19 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) | |||
1394 | return 0; | 1394 | return 0; |
1395 | 1395 | ||
1396 | r = regulator_dev_lookup(dev, rdev->supply_name, &ret); | 1396 | r = regulator_dev_lookup(dev, rdev->supply_name, &ret); |
1397 | if (ret == -ENODEV) { | ||
1398 | /* | ||
1399 | * No supply was specified for this regulator and | ||
1400 | * there will never be one. | ||
1401 | */ | ||
1402 | return 0; | ||
1403 | } | ||
1404 | |||
1405 | if (!r) { | 1397 | if (!r) { |
1398 | if (ret == -ENODEV) { | ||
1399 | /* | ||
1400 | * No supply was specified for this regulator and | ||
1401 | * there will never be one. | ||
1402 | */ | ||
1403 | return 0; | ||
1404 | } | ||
1405 | |||
1406 | /* Did the lookup explicitly defer for us? */ | ||
1407 | if (ret == -EPROBE_DEFER) | ||
1408 | return ret; | ||
1409 | |||
1406 | if (have_full_constraints()) { | 1410 | if (have_full_constraints()) { |
1407 | r = dummy_regulator_rdev; | 1411 | r = dummy_regulator_rdev; |
1408 | } else { | 1412 | } else { |
@@ -1422,11 +1426,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) | |||
1422 | return ret; | 1426 | return ret; |
1423 | 1427 | ||
1424 | /* Cascade always-on state to supply */ | 1428 | /* Cascade always-on state to supply */ |
1425 | if (_regulator_is_enabled(rdev)) { | 1429 | if (_regulator_is_enabled(rdev) && rdev->supply) { |
1426 | ret = regulator_enable(rdev->supply); | 1430 | ret = regulator_enable(rdev->supply); |
1427 | if (ret < 0) { | 1431 | if (ret < 0) { |
1428 | if (rdev->supply) | 1432 | _regulator_put(rdev->supply); |
1429 | _regulator_put(rdev->supply); | ||
1430 | return ret; | 1433 | return ret; |
1431 | } | 1434 | } |
1432 | } | 1435 | } |
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 464018de7e97..7bba8b747f30 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
@@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = { | |||
394 | { .compatible = "regulator-gpio", }, | 394 | { .compatible = "regulator-gpio", }, |
395 | {}, | 395 | {}, |
396 | }; | 396 | }; |
397 | MODULE_DEVICE_TABLE(of, regulator_gpio_of_match); | ||
397 | #endif | 398 | #endif |
398 | 399 | ||
399 | static struct platform_driver gpio_regulator_driver = { | 400 | static struct platform_driver gpio_regulator_driver = { |
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c index 4fa7bcaf454e..f9d74d63be7c 100644 --- a/drivers/regulator/pbias-regulator.c +++ b/drivers/regulator/pbias-regulator.c | |||
@@ -45,6 +45,10 @@ struct pbias_regulator_data { | |||
45 | int voltage; | 45 | int voltage; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct pbias_of_data { | ||
49 | unsigned int offset; | ||
50 | }; | ||
51 | |||
48 | static const unsigned int pbias_volt_table[] = { | 52 | static const unsigned int pbias_volt_table[] = { |
49 | 1800000, | 53 | 1800000, |
50 | 3000000 | 54 | 3000000 |
@@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = { | |||
102 | }; | 106 | }; |
103 | #define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) | 107 | #define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) |
104 | 108 | ||
109 | /* Offset from SCM general area (and syscon) base */ | ||
110 | |||
111 | static const struct pbias_of_data pbias_of_data_omap2 = { | ||
112 | .offset = 0x230, | ||
113 | }; | ||
114 | |||
115 | static const struct pbias_of_data pbias_of_data_omap3 = { | ||
116 | .offset = 0x2b0, | ||
117 | }; | ||
118 | |||
119 | static const struct pbias_of_data pbias_of_data_omap4 = { | ||
120 | .offset = 0x60, | ||
121 | }; | ||
122 | |||
123 | static const struct pbias_of_data pbias_of_data_omap5 = { | ||
124 | .offset = 0x60, | ||
125 | }; | ||
126 | |||
127 | static const struct pbias_of_data pbias_of_data_dra7 = { | ||
128 | .offset = 0xe00, | ||
129 | }; | ||
130 | |||
105 | static const struct of_device_id pbias_of_match[] = { | 131 | static const struct of_device_id pbias_of_match[] = { |
106 | { .compatible = "ti,pbias-omap", }, | 132 | { .compatible = "ti,pbias-omap", }, |
133 | { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, }, | ||
134 | { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, }, | ||
135 | { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, }, | ||
136 | { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, }, | ||
137 | { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, }, | ||
107 | {}, | 138 | {}, |
108 | }; | 139 | }; |
109 | MODULE_DEVICE_TABLE(of, pbias_of_match); | 140 | MODULE_DEVICE_TABLE(of, pbias_of_match); |
@@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev) | |||
118 | const struct pbias_reg_info *info; | 149 | const struct pbias_reg_info *info; |
119 | int ret = 0; | 150 | int ret = 0; |
120 | int count, idx, data_idx = 0; | 151 | int count, idx, data_idx = 0; |
152 | const struct of_device_id *match; | ||
153 | const struct pbias_of_data *data; | ||
154 | unsigned int offset; | ||
121 | 155 | ||
122 | count = of_regulator_match(&pdev->dev, np, pbias_matches, | 156 | count = of_regulator_match(&pdev->dev, np, pbias_matches, |
123 | PBIAS_NUM_REGS); | 157 | PBIAS_NUM_REGS); |
@@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev) | |||
133 | if (IS_ERR(syscon)) | 167 | if (IS_ERR(syscon)) |
134 | return PTR_ERR(syscon); | 168 | return PTR_ERR(syscon); |
135 | 169 | ||
170 | match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev); | ||
171 | if (match && match->data) { | ||
172 | data = match->data; | ||
173 | offset = data->offset; | ||
174 | } else { | ||
175 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
176 | if (!res) | ||
177 | return -EINVAL; | ||
178 | |||
179 | offset = res->start; | ||
180 | dev_WARN(&pdev->dev, | ||
181 | "using legacy dt data for pbias offset\n"); | ||
182 | } | ||
183 | |||
136 | cfg.regmap = syscon; | 184 | cfg.regmap = syscon; |
137 | cfg.dev = &pdev->dev; | 185 | cfg.dev = &pdev->dev; |
138 | 186 | ||
@@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev) | |||
145 | if (!info) | 193 | if (!info) |
146 | return -ENODEV; | 194 | return -ENODEV; |
147 | 195 | ||
148 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
149 | if (!res) | ||
150 | return -EINVAL; | ||
151 | |||
152 | drvdata[data_idx].syscon = syscon; | 196 | drvdata[data_idx].syscon = syscon; |
153 | drvdata[data_idx].info = info; | 197 | drvdata[data_idx].info = info; |
154 | drvdata[data_idx].desc.name = info->name; | 198 | drvdata[data_idx].desc.name = info->name; |
@@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev) | |||
158 | drvdata[data_idx].desc.volt_table = pbias_volt_table; | 202 | drvdata[data_idx].desc.volt_table = pbias_volt_table; |
159 | drvdata[data_idx].desc.n_voltages = 2; | 203 | drvdata[data_idx].desc.n_voltages = 2; |
160 | drvdata[data_idx].desc.enable_time = info->enable_time; | 204 | drvdata[data_idx].desc.enable_time = info->enable_time; |
161 | drvdata[data_idx].desc.vsel_reg = res->start; | 205 | drvdata[data_idx].desc.vsel_reg = offset; |
162 | drvdata[data_idx].desc.vsel_mask = info->vmode; | 206 | drvdata[data_idx].desc.vsel_mask = info->vmode; |
163 | drvdata[data_idx].desc.enable_reg = res->start; | 207 | drvdata[data_idx].desc.enable_reg = offset; |
164 | drvdata[data_idx].desc.enable_mask = info->enable_mask; | 208 | drvdata[data_idx].desc.enable_mask = info->enable_mask; |
165 | drvdata[data_idx].desc.enable_val = info->enable; | 209 | drvdata[data_idx].desc.enable_val = info->enable; |
166 | drvdata[data_idx].desc.disable_val = info->disable_val; | 210 | drvdata[data_idx].desc.disable_val = info->disable_val; |
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index 7f97223f95c5..a02c1b961039 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c | |||
@@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = { | |||
73 | }; | 73 | }; |
74 | 74 | ||
75 | static struct tps_info tps65218_pmic_regs[] = { | 75 | static struct tps_info tps65218_pmic_regs[] = { |
76 | TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500), | 76 | TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000), |
77 | TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), | 77 | TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), |
78 | TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), | 78 | TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), |
79 | TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), | 79 | TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), |
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c index bed9d3ee4198..c810cbbd463f 100644 --- a/drivers/regulator/vexpress.c +++ b/drivers/regulator/vexpress.c | |||
@@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = { | |||
103 | { .compatible = "arm,vexpress-volt", }, | 103 | { .compatible = "arm,vexpress-volt", }, |
104 | { } | 104 | { } |
105 | }; | 105 | }; |
106 | MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match); | ||
106 | 107 | ||
107 | static struct platform_driver vexpress_regulator_driver = { | 108 | static struct platform_driver vexpress_regulator_driver = { |
108 | .probe = vexpress_regulator_probe, | 109 | .probe = vexpress_regulator_probe, |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..e9fae30fafda 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -400,12 +400,16 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq) | |||
400 | static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, | 400 | static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, |
401 | struct ccw1 *ccw, int index) | 401 | struct ccw1 *ccw, int index) |
402 | { | 402 | { |
403 | int ret; | ||
404 | |||
403 | vcdev->config_block->index = index; | 405 | vcdev->config_block->index = index; |
404 | ccw->cmd_code = CCW_CMD_READ_VQ_CONF; | 406 | ccw->cmd_code = CCW_CMD_READ_VQ_CONF; |
405 | ccw->flags = 0; | 407 | ccw->flags = 0; |
406 | ccw->count = sizeof(struct vq_config_block); | 408 | ccw->count = sizeof(struct vq_config_block); |
407 | ccw->cda = (__u32)(unsigned long)(vcdev->config_block); | 409 | ccw->cda = (__u32)(unsigned long)(vcdev->config_block); |
408 | ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); | 410 | ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); |
411 | if (ret) | ||
412 | return ret; | ||
409 | return vcdev->config_block->num; | 413 | return vcdev->config_block->num; |
410 | } | 414 | } |
411 | 415 | ||
@@ -503,6 +507,10 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, | |||
503 | goto out_err; | 507 | goto out_err; |
504 | } | 508 | } |
505 | info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); | 509 | info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); |
510 | if (info->num < 0) { | ||
511 | err = info->num; | ||
512 | goto out_err; | ||
513 | } | ||
506 | size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); | 514 | size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); |
507 | info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | 515 | info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); |
508 | if (info->queue == NULL) { | 516 | if (info->queue == NULL) { |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index add419d6ff34..a56a7b243e91 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = { | |||
212 | .llseek = noop_llseek, | 212 | .llseek = noop_llseek, |
213 | }; | 213 | }; |
214 | 214 | ||
215 | /* | ||
216 | * The controllers use an inline buffer instead of a mapped SGL for small, | ||
217 | * single entry buffers. Note that we treat a zero-length transfer like | ||
218 | * a mapped SGL. | ||
219 | */ | ||
220 | static bool twa_command_mapped(struct scsi_cmnd *cmd) | ||
221 | { | ||
222 | return scsi_sg_count(cmd) != 1 || | ||
223 | scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; | ||
224 | } | ||
225 | |||
215 | /* This function will complete an aen request from the isr */ | 226 | /* This function will complete an aen request from the isr */ |
216 | static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) | 227 | static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) |
217 | { | 228 | { |
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) | |||
1339 | } | 1350 | } |
1340 | 1351 | ||
1341 | /* Now complete the io */ | 1352 | /* Now complete the io */ |
1342 | scsi_dma_unmap(cmd); | 1353 | if (twa_command_mapped(cmd)) |
1354 | scsi_dma_unmap(cmd); | ||
1343 | cmd->scsi_done(cmd); | 1355 | cmd->scsi_done(cmd); |
1344 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1356 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1345 | twa_free_request_id(tw_dev, request_id); | 1357 | twa_free_request_id(tw_dev, request_id); |
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) | |||
1582 | struct scsi_cmnd *cmd = tw_dev->srb[i]; | 1594 | struct scsi_cmnd *cmd = tw_dev->srb[i]; |
1583 | 1595 | ||
1584 | cmd->result = (DID_RESET << 16); | 1596 | cmd->result = (DID_RESET << 16); |
1585 | scsi_dma_unmap(cmd); | 1597 | if (twa_command_mapped(cmd)) |
1598 | scsi_dma_unmap(cmd); | ||
1586 | cmd->scsi_done(cmd); | 1599 | cmd->scsi_done(cmd); |
1587 | } | 1600 | } |
1588 | } | 1601 | } |
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1765 | retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); | 1778 | retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); |
1766 | switch (retval) { | 1779 | switch (retval) { |
1767 | case SCSI_MLQUEUE_HOST_BUSY: | 1780 | case SCSI_MLQUEUE_HOST_BUSY: |
1768 | scsi_dma_unmap(SCpnt); | 1781 | if (twa_command_mapped(SCpnt)) |
1782 | scsi_dma_unmap(SCpnt); | ||
1769 | twa_free_request_id(tw_dev, request_id); | 1783 | twa_free_request_id(tw_dev, request_id); |
1770 | break; | 1784 | break; |
1771 | case 1: | 1785 | case 1: |
1772 | SCpnt->result = (DID_ERROR << 16); | 1786 | SCpnt->result = (DID_ERROR << 16); |
1773 | scsi_dma_unmap(SCpnt); | 1787 | if (twa_command_mapped(SCpnt)) |
1788 | scsi_dma_unmap(SCpnt); | ||
1774 | done(SCpnt); | 1789 | done(SCpnt); |
1775 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1790 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1776 | twa_free_request_id(tw_dev, request_id); | 1791 | twa_free_request_id(tw_dev, request_id); |
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, | |||
1831 | /* Map sglist from scsi layer to cmd packet */ | 1846 | /* Map sglist from scsi layer to cmd packet */ |
1832 | 1847 | ||
1833 | if (scsi_sg_count(srb)) { | 1848 | if (scsi_sg_count(srb)) { |
1834 | if ((scsi_sg_count(srb) == 1) && | 1849 | if (!twa_command_mapped(srb)) { |
1835 | (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { | ||
1836 | if (srb->sc_data_direction == DMA_TO_DEVICE || | 1850 | if (srb->sc_data_direction == DMA_TO_DEVICE || |
1837 | srb->sc_data_direction == DMA_BIDIRECTIONAL) | 1851 | srb->sc_data_direction == DMA_BIDIRECTIONAL) |
1838 | scsi_sg_copy_to_buffer(srb, | 1852 | scsi_sg_copy_to_buffer(srb, |
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re | |||
1905 | { | 1919 | { |
1906 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | 1920 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; |
1907 | 1921 | ||
1908 | if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && | 1922 | if (!twa_command_mapped(cmd) && |
1909 | (cmd->sc_data_direction == DMA_FROM_DEVICE || | 1923 | (cmd->sc_data_direction == DMA_FROM_DEVICE || |
1910 | cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { | 1924 | cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { |
1911 | if (scsi_sg_count(cmd) == 1) { | 1925 | if (scsi_sg_count(cmd) == 1) { |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 33c74d3436c9..6bffd91b973a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
976 | wake_up(&conn->ehwait); | 976 | wake_up(&conn->ehwait); |
977 | } | 977 | } |
978 | 978 | ||
979 | static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) | 979 | static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) |
980 | { | 980 | { |
981 | struct iscsi_nopout hdr; | 981 | struct iscsi_nopout hdr; |
982 | struct iscsi_task *task; | 982 | struct iscsi_task *task; |
983 | 983 | ||
984 | if (!rhdr && conn->ping_task) | 984 | if (!rhdr && conn->ping_task) |
985 | return; | 985 | return -EINVAL; |
986 | 986 | ||
987 | memset(&hdr, 0, sizeof(struct iscsi_nopout)); | 987 | memset(&hdr, 0, sizeof(struct iscsi_nopout)); |
988 | hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; | 988 | hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; |
@@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) | |||
996 | hdr.ttt = RESERVED_ITT; | 996 | hdr.ttt = RESERVED_ITT; |
997 | 997 | ||
998 | task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); | 998 | task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); |
999 | if (!task) | 999 | if (!task) { |
1000 | iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); | 1000 | iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); |
1001 | else if (!rhdr) { | 1001 | return -EIO; |
1002 | } else if (!rhdr) { | ||
1002 | /* only track our nops */ | 1003 | /* only track our nops */ |
1003 | conn->ping_task = task; | 1004 | conn->ping_task = task; |
1004 | conn->last_ping = jiffies; | 1005 | conn->last_ping = jiffies; |
1005 | } | 1006 | } |
1007 | |||
1008 | return 0; | ||
1006 | } | 1009 | } |
1007 | 1010 | ||
1008 | static int iscsi_nop_out_rsp(struct iscsi_task *task, | 1011 | static int iscsi_nop_out_rsp(struct iscsi_task *task, |
@@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data) | |||
2092 | if (time_before_eq(last_recv + recv_timeout, jiffies)) { | 2095 | if (time_before_eq(last_recv + recv_timeout, jiffies)) { |
2093 | /* send a ping to try to provoke some traffic */ | 2096 | /* send a ping to try to provoke some traffic */ |
2094 | ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); | 2097 | ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); |
2095 | iscsi_send_nopout(conn, NULL); | 2098 | if (iscsi_send_nopout(conn, NULL)) |
2096 | next_timeout = conn->last_ping + (conn->ping_timeout * HZ); | 2099 | next_timeout = jiffies + (1 * HZ); |
2100 | else | ||
2101 | next_timeout = conn->last_ping + (conn->ping_timeout * HZ); | ||
2097 | } else | 2102 | } else |
2098 | next_timeout = last_recv + recv_timeout; | 2103 | next_timeout = last_recv + recv_timeout; |
2099 | 2104 | ||
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index edb044a7b56d..0a2168e69bbc 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c | |||
@@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name) | |||
111 | 111 | ||
112 | dh = __scsi_dh_lookup(name); | 112 | dh = __scsi_dh_lookup(name); |
113 | if (!dh) { | 113 | if (!dh) { |
114 | request_module(name); | 114 | request_module("scsi_dh_%s", name); |
115 | dh = __scsi_dh_lookup(name); | 115 | dh = __scsi_dh_lookup(name); |
116 | } | 116 | } |
117 | 117 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1957 | static void scsi_mq_done(struct scsi_cmnd *cmd) | 1957 | static void scsi_mq_done(struct scsi_cmnd *cmd) |
1958 | { | 1958 | { |
1959 | trace_scsi_dispatch_cmd_done(cmd); | 1959 | trace_scsi_dispatch_cmd_done(cmd); |
1960 | blk_mq_complete_request(cmd->request); | 1960 | blk_mq_complete_request(cmd->request, cmd->request->errors); |
1961 | } | 1961 | } |
1962 | 1962 | ||
1963 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, | 1963 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 043419dcee92..8e72bcbd3d6d 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c | |||
@@ -65,7 +65,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) | |||
65 | raw_spin_unlock_irqrestore(&intc_big_lock, flags); | 65 | raw_spin_unlock_irqrestore(&intc_big_lock, flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | 68 | static void intc_redirect_irq(struct irq_desc *desc) |
69 | { | 69 | { |
70 | generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); | 70 | generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); |
71 | } | 71 | } |
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h index 7dff08e2a071..6ce7f0d26dcf 100644 --- a/drivers/sh/intc/internals.h +++ b/drivers/sh/intc/internals.h | |||
@@ -99,15 +99,7 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq) | |||
99 | */ | 99 | */ |
100 | static inline void activate_irq(int irq) | 100 | static inline void activate_irq(int irq) |
101 | { | 101 | { |
102 | #ifdef CONFIG_ARM | 102 | irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); |
103 | /* ARM requires an extra step to clear IRQ_NOREQUEST, which it | ||
104 | * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. | ||
105 | */ | ||
106 | set_irq_flags(irq, IRQF_VALID); | ||
107 | #else | ||
108 | /* same effect on other architectures */ | ||
109 | irq_set_noprobe(irq); | ||
110 | #endif | ||
111 | } | 103 | } |
112 | 104 | ||
113 | static inline int intc_handle_int_cmp(const void *a, const void *b) | 105 | static inline int intc_handle_int_cmp(const void *a, const void *b) |
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index bafc51c6f0ba..e7899624aa0b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -109,7 +109,7 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq) | |||
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | 111 | ||
112 | static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) | 112 | static void intc_virq_handler(struct irq_desc *desc) |
113 | { | 113 | { |
114 | unsigned int irq = irq_desc_get_irq(desc); | 114 | unsigned int irq = irq_desc_get_irq(desc); |
115 | struct irq_data *data = irq_desc_get_irq_data(desc); | 115 | struct irq_data *data = irq_desc_get_irq_data(desc); |
@@ -127,7 +127,7 @@ static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) | |||
127 | handle = (unsigned long)irq_desc_get_handler_data(vdesc); | 127 | handle = (unsigned long)irq_desc_get_handler_data(vdesc); |
128 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); | 128 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); |
129 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) | 129 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) |
130 | generic_handle_irq_desc(entry->irq, vdesc); | 130 | generic_handle_irq_desc(vdesc); |
131 | } | 131 | } |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index d3d1891cda3c..25abd4eb7d10 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
@@ -35,20 +35,11 @@ static struct pm_clk_notifier_block platform_bus_notifier = { | |||
35 | static int __init sh_pm_runtime_init(void) | 35 | static int __init sh_pm_runtime_init(void) |
36 | { | 36 | { |
37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | 37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { |
38 | if (!of_machine_is_compatible("renesas,emev2") && | 38 | if (!of_find_compatible_node(NULL, NULL, |
39 | !of_machine_is_compatible("renesas,r7s72100") && | 39 | "renesas,cpg-mstp-clocks")) |
40 | #ifndef CONFIG_PM_GENERIC_DOMAINS_OF | 40 | return 0; |
41 | !of_machine_is_compatible("renesas,r8a73a4") && | 41 | if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) && |
42 | !of_machine_is_compatible("renesas,r8a7740") && | 42 | of_find_node_with_property(NULL, "#power-domain-cells")) |
43 | !of_machine_is_compatible("renesas,sh73a0") && | ||
44 | #endif | ||
45 | !of_machine_is_compatible("renesas,r8a7778") && | ||
46 | !of_machine_is_compatible("renesas,r8a7779") && | ||
47 | !of_machine_is_compatible("renesas,r8a7790") && | ||
48 | !of_machine_is_compatible("renesas,r8a7791") && | ||
49 | !of_machine_is_compatible("renesas,r8a7792") && | ||
50 | !of_machine_is_compatible("renesas,r8a7793") && | ||
51 | !of_machine_is_compatible("renesas,r8a7794")) | ||
52 | return 0; | 43 | return 0; |
53 | } | 44 | } |
54 | 45 | ||
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c index 6792aae9e2e5..052aecf29893 100644 --- a/drivers/soc/dove/pmu.c +++ b/drivers/soc/dove/pmu.c | |||
@@ -222,9 +222,9 @@ static void __pmu_domain_register(struct pmu_domain *domain, | |||
222 | } | 222 | } |
223 | 223 | ||
224 | /* PMU IRQ controller */ | 224 | /* PMU IRQ controller */ |
225 | static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) | 225 | static void pmu_irq_handler(struct irq_desc *desc) |
226 | { | 226 | { |
227 | struct pmu_data *pmu = irq_get_handler_data(irq); | 227 | struct pmu_data *pmu = irq_desc_get_handler_data(desc); |
228 | struct irq_chip_generic *gc = pmu->irq_gc; | 228 | struct irq_chip_generic *gc = pmu->irq_gc; |
229 | struct irq_domain *domain = pmu->irq_domain; | 229 | struct irq_domain *domain = pmu->irq_domain; |
230 | void __iomem *base = gc->reg_base; | 230 | void __iomem *base = gc->reg_base; |
@@ -232,7 +232,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
232 | u32 done = ~0; | 232 | u32 done = ~0; |
233 | 233 | ||
234 | if (stat == 0) { | 234 | if (stat == 0) { |
235 | handle_bad_irq(irq, desc); | 235 | handle_bad_irq(desc); |
236 | return; | 236 | return; |
237 | } | 237 | } |
238 | 238 | ||
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index bf9ed380bb1c..63318e2afba1 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -1720,6 +1720,7 @@ static int atmel_spi_runtime_resume(struct device *dev) | |||
1720 | return clk_prepare_enable(as->clk); | 1720 | return clk_prepare_enable(as->clk); |
1721 | } | 1721 | } |
1722 | 1722 | ||
1723 | #ifdef CONFIG_PM_SLEEP | ||
1723 | static int atmel_spi_suspend(struct device *dev) | 1724 | static int atmel_spi_suspend(struct device *dev) |
1724 | { | 1725 | { |
1725 | struct spi_master *master = dev_get_drvdata(dev); | 1726 | struct spi_master *master = dev_get_drvdata(dev); |
@@ -1756,6 +1757,7 @@ static int atmel_spi_resume(struct device *dev) | |||
1756 | 1757 | ||
1757 | return ret; | 1758 | return ret; |
1758 | } | 1759 | } |
1760 | #endif | ||
1759 | 1761 | ||
1760 | static const struct dev_pm_ops atmel_spi_pm_ops = { | 1762 | static const struct dev_pm_ops atmel_spi_pm_ops = { |
1761 | SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) | 1763 | SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index e7874a6171ec..3e8eeb23d4e9 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master, | |||
386 | /* otherwise we only allow transfers within the same page | 386 | /* otherwise we only allow transfers within the same page |
387 | * to avoid wasting time on dma_mapping when it is not practical | 387 | * to avoid wasting time on dma_mapping when it is not practical |
388 | */ | 388 | */ |
389 | if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { | 389 | if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) { |
390 | dev_warn_once(&spi->dev, | 390 | dev_warn_once(&spi->dev, |
391 | "Unaligned spi tx-transfer bridging page\n"); | 391 | "Unaligned spi tx-transfer bridging page\n"); |
392 | return false; | 392 | return false; |
393 | } | 393 | } |
394 | if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { | 394 | if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) { |
395 | dev_warn_once(&spi->dev, | 395 | dev_warn_once(&spi->dev, |
396 | "Unaligned spi tx-transfer bridging page\n"); | 396 | "Unaligned spi rx-transfer bridging page\n"); |
397 | return false; | 397 | return false; |
398 | } | 398 | } |
399 | 399 | ||
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3cf9faa6cc3f..a85d863d4a44 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
992 | goto free_master; | 992 | goto free_master; |
993 | } | 993 | } |
994 | 994 | ||
995 | dspi->irq = platform_get_irq(pdev, 0); | 995 | ret = platform_get_irq(pdev, 0); |
996 | if (dspi->irq <= 0) { | 996 | if (ret == 0) |
997 | ret = -EINVAL; | 997 | ret = -EINVAL; |
998 | if (ret < 0) | ||
998 | goto free_master; | 999 | goto free_master; |
999 | } | 1000 | dspi->irq = ret; |
1000 | 1001 | ||
1001 | ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, | 1002 | ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, |
1002 | dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); | 1003 | dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); |
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 5468fc70dbf8..2465259f6241 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c | |||
@@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = { | |||
444 | { .compatible = "amlogic,meson6-spifc", }, | 444 | { .compatible = "amlogic,meson6-spifc", }, |
445 | { }, | 445 | { }, |
446 | }; | 446 | }; |
447 | MODULE_DEVICE_TABLE(of, meson_spifc_dt_match); | ||
447 | 448 | ||
448 | static struct platform_driver meson_spifc_driver = { | 449 | static struct platform_driver meson_spifc_driver = { |
449 | .probe = meson_spifc_probe, | 450 | .probe = meson_spifc_probe, |
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 5f6315c47920..ecb6c58238c4 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c | |||
@@ -85,7 +85,7 @@ struct mtk_spi { | |||
85 | void __iomem *base; | 85 | void __iomem *base; |
86 | u32 state; | 86 | u32 state; |
87 | u32 pad_sel; | 87 | u32 pad_sel; |
88 | struct clk *spi_clk, *parent_clk; | 88 | struct clk *parent_clk, *sel_clk, *spi_clk; |
89 | struct spi_transfer *cur_transfer; | 89 | struct spi_transfer *cur_transfer; |
90 | u32 xfer_len; | 90 | u32 xfer_len; |
91 | struct scatterlist *tx_sgl, *rx_sgl; | 91 | struct scatterlist *tx_sgl, *rx_sgl; |
@@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata, | |||
173 | writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); | 173 | writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); |
174 | } | 174 | } |
175 | 175 | ||
176 | static int mtk_spi_prepare_hardware(struct spi_master *master) | ||
177 | { | ||
178 | struct spi_transfer *trans; | ||
179 | struct mtk_spi *mdata = spi_master_get_devdata(master); | ||
180 | struct spi_message *msg = master->cur_msg; | ||
181 | |||
182 | trans = list_first_entry(&msg->transfers, struct spi_transfer, | ||
183 | transfer_list); | ||
184 | if (!trans->cs_change) { | ||
185 | mdata->state = MTK_SPI_IDLE; | ||
186 | mtk_spi_reset(mdata); | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int mtk_spi_prepare_message(struct spi_master *master, | 176 | static int mtk_spi_prepare_message(struct spi_master *master, |
193 | struct spi_message *msg) | 177 | struct spi_message *msg) |
194 | { | 178 | { |
@@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) | |||
228 | struct mtk_spi *mdata = spi_master_get_devdata(spi->master); | 212 | struct mtk_spi *mdata = spi_master_get_devdata(spi->master); |
229 | 213 | ||
230 | reg_val = readl(mdata->base + SPI_CMD_REG); | 214 | reg_val = readl(mdata->base + SPI_CMD_REG); |
231 | if (!enable) | 215 | if (!enable) { |
232 | reg_val |= SPI_CMD_PAUSE_EN; | 216 | reg_val |= SPI_CMD_PAUSE_EN; |
233 | else | 217 | writel(reg_val, mdata->base + SPI_CMD_REG); |
218 | } else { | ||
234 | reg_val &= ~SPI_CMD_PAUSE_EN; | 219 | reg_val &= ~SPI_CMD_PAUSE_EN; |
235 | writel(reg_val, mdata->base + SPI_CMD_REG); | 220 | writel(reg_val, mdata->base + SPI_CMD_REG); |
221 | mdata->state = MTK_SPI_IDLE; | ||
222 | mtk_spi_reset(mdata); | ||
223 | } | ||
236 | } | 224 | } |
237 | 225 | ||
238 | static void mtk_spi_prepare_transfer(struct spi_master *master, | 226 | static void mtk_spi_prepare_transfer(struct spi_master *master, |
@@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev) | |||
509 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 497 | master->mode_bits = SPI_CPOL | SPI_CPHA; |
510 | 498 | ||
511 | master->set_cs = mtk_spi_set_cs; | 499 | master->set_cs = mtk_spi_set_cs; |
512 | master->prepare_transfer_hardware = mtk_spi_prepare_hardware; | ||
513 | master->prepare_message = mtk_spi_prepare_message; | 500 | master->prepare_message = mtk_spi_prepare_message; |
514 | master->transfer_one = mtk_spi_transfer_one; | 501 | master->transfer_one = mtk_spi_transfer_one; |
515 | master->can_dma = mtk_spi_can_dma; | 502 | master->can_dma = mtk_spi_can_dma; |
@@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev) | |||
576 | goto err_put_master; | 563 | goto err_put_master; |
577 | } | 564 | } |
578 | 565 | ||
579 | mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); | ||
580 | if (IS_ERR(mdata->spi_clk)) { | ||
581 | ret = PTR_ERR(mdata->spi_clk); | ||
582 | dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); | ||
583 | goto err_put_master; | ||
584 | } | ||
585 | |||
586 | mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); | 566 | mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); |
587 | if (IS_ERR(mdata->parent_clk)) { | 567 | if (IS_ERR(mdata->parent_clk)) { |
588 | ret = PTR_ERR(mdata->parent_clk); | 568 | ret = PTR_ERR(mdata->parent_clk); |
@@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev) | |||
590 | goto err_put_master; | 570 | goto err_put_master; |
591 | } | 571 | } |
592 | 572 | ||
573 | mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); | ||
574 | if (IS_ERR(mdata->sel_clk)) { | ||
575 | ret = PTR_ERR(mdata->sel_clk); | ||
576 | dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); | ||
577 | goto err_put_master; | ||
578 | } | ||
579 | |||
580 | mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); | ||
581 | if (IS_ERR(mdata->spi_clk)) { | ||
582 | ret = PTR_ERR(mdata->spi_clk); | ||
583 | dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); | ||
584 | goto err_put_master; | ||
585 | } | ||
586 | |||
593 | ret = clk_prepare_enable(mdata->spi_clk); | 587 | ret = clk_prepare_enable(mdata->spi_clk); |
594 | if (ret < 0) { | 588 | if (ret < 0) { |
595 | dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); | 589 | dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); |
596 | goto err_put_master; | 590 | goto err_put_master; |
597 | } | 591 | } |
598 | 592 | ||
599 | ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); | 593 | ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); |
600 | if (ret < 0) { | 594 | if (ret < 0) { |
601 | dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); | 595 | dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); |
602 | goto err_disable_clk; | 596 | goto err_disable_clk; |
@@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev) | |||
630 | pm_runtime_disable(&pdev->dev); | 624 | pm_runtime_disable(&pdev->dev); |
631 | 625 | ||
632 | mtk_spi_reset(mdata); | 626 | mtk_spi_reset(mdata); |
633 | clk_disable_unprepare(mdata->spi_clk); | ||
634 | spi_master_put(master); | 627 | spi_master_put(master); |
635 | 628 | ||
636 | return 0; | 629 | return 0; |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index fdd791977041..a8ef38ebb9c9 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
654 | if (!(sccr1_reg & SSCR1_TIE)) | 654 | if (!(sccr1_reg & SSCR1_TIE)) |
655 | mask &= ~SSSR_TFS; | 655 | mask &= ~SSSR_TFS; |
656 | 656 | ||
657 | /* Ignore RX timeout interrupt if it is disabled */ | ||
658 | if (!(sccr1_reg & SSCR1_TINTE)) | ||
659 | mask &= ~SSSR_TINT; | ||
660 | |||
657 | if (!(status & mask)) | 661 | if (!(status & mask)) |
658 | return IRQ_NONE; | 662 | return IRQ_NONE; |
659 | 663 | ||
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c index 2e32ea2f194f..be6155cba9de 100644 --- a/drivers/spi/spi-xtensa-xtfpga.c +++ b/drivers/spi/spi-xtensa-xtfpga.c | |||
@@ -34,13 +34,13 @@ struct xtfpga_spi { | |||
34 | static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, | 34 | static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, |
35 | unsigned addr, u32 val) | 35 | unsigned addr, u32 val) |
36 | { | 36 | { |
37 | iowrite32(val, spi->regs + addr); | 37 | __raw_writel(val, spi->regs + addr); |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, | 40 | static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, |
41 | unsigned addr) | 41 | unsigned addr) |
42 | { | 42 | { |
43 | return ioread32(spi->regs + addr); | 43 | return __raw_readl(spi->regs + addr); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) | 46 | static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 3abb3903f2ad..a5f53de813d3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1610,8 +1610,7 @@ static struct class spi_master_class = { | |||
1610 | * | 1610 | * |
1611 | * The caller is responsible for assigning the bus number and initializing | 1611 | * The caller is responsible for assigning the bus number and initializing |
1612 | * the master's methods before calling spi_register_master(); and (after errors | 1612 | * the master's methods before calling spi_register_master(); and (after errors |
1613 | * adding the device) calling spi_master_put() and kfree() to prevent a memory | 1613 | * adding the device) calling spi_master_put() to prevent a memory leak. |
1614 | * leak. | ||
1615 | */ | 1614 | */ |
1616 | struct spi_master *spi_alloc_master(struct device *dev, unsigned size) | 1615 | struct spi_master *spi_alloc_master(struct device *dev, unsigned size) |
1617 | { | 1616 | { |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index fba92a526531..ef008e52f953 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp) | |||
651 | kfree(spidev->rx_buffer); | 651 | kfree(spidev->rx_buffer); |
652 | spidev->rx_buffer = NULL; | 652 | spidev->rx_buffer = NULL; |
653 | 653 | ||
654 | spidev->speed_hz = spidev->spi->max_speed_hz; | 654 | if (spidev->spi) |
655 | spidev->speed_hz = spidev->spi->max_speed_hz; | ||
655 | 656 | ||
656 | /* ... after we unbound from the underlying device? */ | 657 | /* ... after we unbound from the underlying device? */ |
657 | spin_lock_irq(&spidev->spi_lock); | 658 | spin_lock_irq(&spidev->spi_lock); |
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bdfb3c84c3cb..4a3cf9ba152f 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c | |||
@@ -451,7 +451,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid) | |||
451 | } | 451 | } |
452 | } | 452 | } |
453 | 453 | ||
454 | static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) | 454 | static void pmic_arb_chained_irq(struct irq_desc *desc) |
455 | { | 455 | { |
456 | struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); | 456 | struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); |
457 | struct irq_chip *chip = irq_desc_get_chip(desc); | 457 | struct irq_chip *chip = irq_desc_get_chip(desc); |
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 20288fc53946..8f3ac37bfe12 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO | |||
@@ -5,5 +5,25 @@ TODO: | |||
5 | - add proper arch dependencies as needed | 5 | - add proper arch dependencies as needed |
6 | - audit userspace interfaces to make sure they are sane | 6 | - audit userspace interfaces to make sure they are sane |
7 | 7 | ||
8 | |||
9 | ion/ | ||
10 | - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal | ||
11 | interface on top of dma-buf. flush_for_device needs to be added to dma-buf | ||
12 | first. | ||
13 | - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some | ||
14 | vendor trees. Should be replaced with an ioctl on the dma-buf to expose the | ||
15 | begin/end_cpu_access hooks to userspace. | ||
16 | - Clarify the tricks ion plays with explicitly managing coherency behind the | ||
17 | dma api's back (this is absolutely needed for high-perf gpu drivers): Add an | ||
18 | explicit coherency management mode to flush_for_device to be used by drivers | ||
19 | which want to manage caches themselves and which indicates whether cpu caches | ||
20 | need flushing. | ||
21 | - With those removed there's probably no use for ION_IOC_IMPORT anymore either | ||
22 | since ion would just be the central allocator for shared buffers. | ||
23 | - Add dt-binding to expose cma regions as ion heaps, with the rule that any | ||
24 | such cma regions must already be used by some device for dma. I.e. ion only | ||
25 | exposes existing cma regions and doesn't reserve unecessarily memory when | ||
26 | booting a system which doesn't use ion. | ||
27 | |||
8 | Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: | 28 | Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: |
9 | Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> | 29 | Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> |
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 217aa537c4eb..6e8d8392ca38 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) | |||
1179 | mutex_unlock(&client->lock); | 1179 | mutex_unlock(&client->lock); |
1180 | goto end; | 1180 | goto end; |
1181 | } | 1181 | } |
1182 | mutex_unlock(&client->lock); | ||
1183 | 1182 | ||
1184 | handle = ion_handle_create(client, buffer); | 1183 | handle = ion_handle_create(client, buffer); |
1185 | if (IS_ERR(handle)) | 1184 | if (IS_ERR(handle)) { |
1185 | mutex_unlock(&client->lock); | ||
1186 | goto end; | 1186 | goto end; |
1187 | } | ||
1187 | 1188 | ||
1188 | mutex_lock(&client->lock); | ||
1189 | ret = ion_handle_add(client, handle); | 1189 | ret = ion_handle_add(client, handle); |
1190 | mutex_unlock(&client->lock); | 1190 | mutex_unlock(&client->lock); |
1191 | if (ret) { | 1191 | if (ret) { |
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c index 32f3a9d921d6..5cafa50d1fac 100644 --- a/drivers/staging/fbtft/fb_uc1611.c +++ b/drivers/staging/fbtft/fb_uc1611.c | |||
@@ -76,7 +76,7 @@ static int init_display(struct fbtft_par *par) | |||
76 | 76 | ||
77 | /* Set CS active high */ | 77 | /* Set CS active high */ |
78 | par->spi->mode |= SPI_CS_HIGH; | 78 | par->spi->mode |= SPI_CS_HIGH; |
79 | ret = par->spi->master->setup(par->spi); | 79 | ret = spi_setup(par->spi); |
80 | if (ret) { | 80 | if (ret) { |
81 | dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); | 81 | dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); |
82 | return ret; | 82 | return ret; |
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c index 88fb2c0132d5..8eae6ef25846 100644 --- a/drivers/staging/fbtft/fb_watterott.c +++ b/drivers/staging/fbtft/fb_watterott.c | |||
@@ -169,7 +169,7 @@ static int init_display(struct fbtft_par *par) | |||
169 | /* enable SPI interface by having CS and MOSI low during reset */ | 169 | /* enable SPI interface by having CS and MOSI low during reset */ |
170 | save_mode = par->spi->mode; | 170 | save_mode = par->spi->mode; |
171 | par->spi->mode |= SPI_CS_HIGH; | 171 | par->spi->mode |= SPI_CS_HIGH; |
172 | ret = par->spi->master->setup(par->spi); /* set CS inactive low */ | 172 | ret = spi_setup(par->spi); /* set CS inactive low */ |
173 | if (ret) { | 173 | if (ret) { |
174 | dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); | 174 | dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); |
175 | return ret; | 175 | return ret; |
@@ -180,7 +180,7 @@ static int init_display(struct fbtft_par *par) | |||
180 | par->fbtftops.reset(par); | 180 | par->fbtftops.reset(par); |
181 | mdelay(1000); | 181 | mdelay(1000); |
182 | par->spi->mode = save_mode; | 182 | par->spi->mode = save_mode; |
183 | ret = par->spi->master->setup(par->spi); | 183 | ret = spi_setup(par->spi); |
184 | if (ret) { | 184 | if (ret) { |
185 | dev_err(par->info->device, "Could not restore SPI mode\n"); | 185 | dev_err(par->info->device, "Could not restore SPI mode\n"); |
186 | return ret; | 186 | return ret; |
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 23392eb6799e..7f5fa3d1cab0 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c | |||
@@ -1436,15 +1436,11 @@ int fbtft_probe_common(struct fbtft_display *display, | |||
1436 | 1436 | ||
1437 | /* 9-bit SPI setup */ | 1437 | /* 9-bit SPI setup */ |
1438 | if (par->spi && display->buswidth == 9) { | 1438 | if (par->spi && display->buswidth == 9) { |
1439 | par->spi->bits_per_word = 9; | 1439 | if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) { |
1440 | ret = par->spi->master->setup(par->spi); | 1440 | par->spi->bits_per_word = 9; |
1441 | if (ret) { | 1441 | } else { |
1442 | dev_warn(&par->spi->dev, | 1442 | dev_warn(&par->spi->dev, |
1443 | "9-bit SPI not available, emulating using 8-bit.\n"); | 1443 | "9-bit SPI not available, emulating using 8-bit.\n"); |
1444 | par->spi->bits_per_word = 8; | ||
1445 | ret = par->spi->master->setup(par->spi); | ||
1446 | if (ret) | ||
1447 | goto out_release; | ||
1448 | /* allocate buffer with room for dc bits */ | 1444 | /* allocate buffer with room for dc bits */ |
1449 | par->extra = devm_kzalloc(par->info->device, | 1445 | par->extra = devm_kzalloc(par->info->device, |
1450 | par->txbuf.len + (par->txbuf.len / 8) + 8, | 1446 | par->txbuf.len + (par->txbuf.len / 8) + 8, |
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c index c763efc5de7d..3f380a0086c3 100644 --- a/drivers/staging/fbtft/flexfb.c +++ b/drivers/staging/fbtft/flexfb.c | |||
@@ -463,15 +463,12 @@ static int flexfb_probe_common(struct spi_device *sdev, | |||
463 | } | 463 | } |
464 | par->fbtftops.write_register = fbtft_write_reg8_bus9; | 464 | par->fbtftops.write_register = fbtft_write_reg8_bus9; |
465 | par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; | 465 | par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; |
466 | sdev->bits_per_word = 9; | 466 | if (par->spi->master->bits_per_word_mask |
467 | ret = sdev->master->setup(sdev); | 467 | & SPI_BPW_MASK(9)) { |
468 | if (ret) { | 468 | par->spi->bits_per_word = 9; |
469 | } else { | ||
469 | dev_warn(dev, | 470 | dev_warn(dev, |
470 | "9-bit SPI not available, emulating using 8-bit.\n"); | 471 | "9-bit SPI not available, emulating using 8-bit.\n"); |
471 | sdev->bits_per_word = 8; | ||
472 | ret = sdev->master->setup(sdev); | ||
473 | if (ret) | ||
474 | goto out_release; | ||
475 | /* allocate buffer with room for dc bits */ | 472 | /* allocate buffer with room for dc bits */ |
476 | par->extra = devm_kzalloc(par->info->device, | 473 | par->extra = devm_kzalloc(par->info->device, |
477 | par->txbuf.len + (par->txbuf.len / 8) + 8, | 474 | par->txbuf.len + (par->txbuf.len / 8) + 8, |
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt index cf0ca50ff83b..0676243eea9e 100644 --- a/drivers/staging/lustre/README.txt +++ b/drivers/staging/lustre/README.txt | |||
@@ -14,10 +14,8 @@ Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS), | |||
14 | Lustre has independent Metadata and Data servers that clients can access | 14 | Lustre has independent Metadata and Data servers that clients can access |
15 | in parallel to maximize performance. | 15 | in parallel to maximize performance. |
16 | 16 | ||
17 | In order to use Lustre client you will need to download lustre client | 17 | In order to use Lustre client you will need to download the "lustre-client" |
18 | tools from | 18 | package that contains the userspace tools from http://lustre.org/download/ |
19 | https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/ | ||
20 | the package name is lustre-client. | ||
21 | 19 | ||
22 | You will need to install and configure your Lustre servers separately. | 20 | You will need to install and configure your Lustre servers separately. |
23 | 21 | ||
@@ -76,12 +74,10 @@ Mount Options | |||
76 | 74 | ||
77 | More Information | 75 | More Information |
78 | ================ | 76 | ================ |
79 | You can get more information at | 77 | You can get more information at the Lustre website: http://wiki.lustre.org/ |
80 | OpenSFS website: http://lustre.opensfs.org/about/ | ||
81 | Intel HPDD wiki: https://wiki.hpdd.intel.com | ||
82 | 78 | ||
83 | Out of tree Lustre client and server code is available at: | 79 | Source for the userspace tools and out-of-tree client and server code |
84 | http://git.whamcloud.com/fs/lustre-release.git | 80 | is available at: http://git.hpdd.intel.com/fs/lustre-release.git |
85 | 81 | ||
86 | Latest binary packages: | 82 | Latest binary packages: |
87 | http://lustre.opensfs.org/download-lustre/ | 83 | http://lustre.org/download/ |
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 769b61193d87..a9bc6e23fc25 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c | |||
@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) | |||
224 | 224 | ||
225 | prefetchw(&page->flags); | 225 | prefetchw(&page->flags); |
226 | ret = add_to_page_cache_lru(page, inode->i_mapping, offset, | 226 | ret = add_to_page_cache_lru(page, inode->i_mapping, offset, |
227 | GFP_KERNEL); | 227 | GFP_NOFS); |
228 | if (ret == 0) { | 228 | if (ret == 0) { |
229 | unlock_page(page); | 229 | unlock_page(page); |
230 | } else { | 230 | } else { |
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig index d50de03de7b9..0b9b9b539f70 100644 --- a/drivers/staging/most/Kconfig +++ b/drivers/staging/most/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menuconfig MOST | 1 | menuconfig MOST |
2 | tristate "MOST driver" | 2 | tristate "MOST driver" |
3 | depends on HAS_DMA | ||
3 | select MOSTCORE | 4 | select MOSTCORE |
4 | default n | 5 | default n |
5 | ---help--- | 6 | ---help--- |
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig index 1d4ad1d67758..fc548769479b 100644 --- a/drivers/staging/most/hdm-dim2/Kconfig +++ b/drivers/staging/most/hdm-dim2/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config HDM_DIM2 | 5 | config HDM_DIM2 |
6 | tristate "DIM2 HDM" | 6 | tristate "DIM2 HDM" |
7 | depends on AIM_NETWORK | 7 | depends on AIM_NETWORK |
8 | depends on HAS_IOMEM | ||
8 | 9 | ||
9 | ---help--- | 10 | ---help--- |
10 | Say Y here if you want to connect via MediaLB to network transceiver. | 11 | Say Y here if you want to connect via MediaLB to network transceiver. |
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig index a482c3fdf34b..ec1546312ee6 100644 --- a/drivers/staging/most/hdm-usb/Kconfig +++ b/drivers/staging/most/hdm-usb/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config HDM_USB | 5 | config HDM_USB |
6 | tristate "USB HDM" | 6 | tristate "USB HDM" |
7 | depends on USB | 7 | depends on USB && NET |
8 | select AIM_NETWORK | 8 | select AIM_NETWORK |
9 | ---help--- | 9 | ---help--- |
10 | Say Y here if you want to connect via USB to network tranceiver. | 10 | Say Y here if you want to connect via USB to network tranceiver. |
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig index 38abf1b21b66..47172546d728 100644 --- a/drivers/staging/most/mostcore/Kconfig +++ b/drivers/staging/most/mostcore/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | config MOSTCORE | 5 | config MOSTCORE |
6 | tristate "MOST Core" | 6 | tristate "MOST Core" |
7 | depends on HAS_DMA | ||
7 | 8 | ||
8 | ---help--- | 9 | ---help--- |
9 | Say Y here if you want to enable MOST support. | 10 | Say Y here if you want to enable MOST support. |
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig index cf5fe9bb87a1..d7f62359d743 100644 --- a/drivers/staging/rdma/Kconfig +++ b/drivers/staging/rdma/Kconfig | |||
@@ -24,6 +24,8 @@ if STAGING_RDMA | |||
24 | 24 | ||
25 | source "drivers/staging/rdma/amso1100/Kconfig" | 25 | source "drivers/staging/rdma/amso1100/Kconfig" |
26 | 26 | ||
27 | source "drivers/staging/rdma/ehca/Kconfig" | ||
28 | |||
27 | source "drivers/staging/rdma/hfi1/Kconfig" | 29 | source "drivers/staging/rdma/hfi1/Kconfig" |
28 | 30 | ||
29 | source "drivers/staging/rdma/ipath/Kconfig" | 31 | source "drivers/staging/rdma/ipath/Kconfig" |
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile index cbd915ac7f20..139d78ef2c24 100644 --- a/drivers/staging/rdma/Makefile +++ b/drivers/staging/rdma/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # Entries for RDMA_STAGING tree | 1 | # Entries for RDMA_STAGING tree |
2 | obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/ | 2 | obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/ |
3 | obj-$(CONFIG_INFINIBAND_EHCA) += ehca/ | ||
3 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ | 4 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ |
4 | obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ | 5 | obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ |
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig index 59f807d8d58e..3fadd2ad6426 100644 --- a/drivers/infiniband/hw/ehca/Kconfig +++ b/drivers/staging/rdma/ehca/Kconfig | |||
@@ -2,7 +2,8 @@ config INFINIBAND_EHCA | |||
2 | tristate "eHCA support" | 2 | tristate "eHCA support" |
3 | depends on IBMEBUS | 3 | depends on IBMEBUS |
4 | ---help--- | 4 | ---help--- |
5 | This driver supports the IBM pSeries eHCA InfiniBand adapter. | 5 | This driver supports the deprecated IBM pSeries eHCA InfiniBand |
6 | adapter. | ||
6 | 7 | ||
7 | To compile the driver as a module, choose M here. The module | 8 | To compile the driver as a module, choose M here. The module |
8 | will be called ib_ehca. | 9 | will be called ib_ehca. |
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile index 74d284e46a40..74d284e46a40 100644 --- a/drivers/infiniband/hw/ehca/Makefile +++ b/drivers/staging/rdma/ehca/Makefile | |||
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO new file mode 100644 index 000000000000..199a4a600142 --- /dev/null +++ b/drivers/staging/rdma/ehca/TODO | |||
@@ -0,0 +1,4 @@ | |||
1 | 9/2015 | ||
2 | |||
3 | The ehca driver has been deprecated and moved to drivers/staging/rdma. | ||
4 | It will be removed in the 4.6 merge window. | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c index 465926319f3d..465926319f3d 100644 --- a/drivers/infiniband/hw/ehca/ehca_av.c +++ b/drivers/staging/rdma/ehca/ehca_av.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h index bd45e0f3923f..bd45e0f3923f 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/staging/rdma/ehca/ehca_classes.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h index 689c35786dd2..689c35786dd2 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +++ b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c index 9b68b175069b..9b68b175069b 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/staging/rdma/ehca/ehca_cq.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c index 90da6747d395..90da6747d395 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/staging/rdma/ehca/ehca_eq.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c index e8b1bb65797a..e8b1bb65797a 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/staging/rdma/ehca/ehca_hca.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c index 8615d7cf7e01..8615d7cf7e01 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/staging/rdma/ehca/ehca_irq.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h index 5370199f08c7..5370199f08c7 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.h +++ b/drivers/staging/rdma/ehca/ehca_irq.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h index 80e6a3d5df3e..80e6a3d5df3e 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/staging/rdma/ehca/ehca_iverbs.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c index 8246418cd4e0..8246418cd4e0 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/staging/rdma/ehca/ehca_main.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c index cec181532924..cec181532924 100644 --- a/drivers/infiniband/hw/ehca/ehca_mcast.c +++ b/drivers/staging/rdma/ehca/ehca_mcast.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c index f914b30999f8..f914b30999f8 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/staging/rdma/ehca/ehca_mrmw.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h index 50d8b51306dd..50d8b51306dd 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.h +++ b/drivers/staging/rdma/ehca/ehca_mrmw.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c index 351577a6670a..351577a6670a 100644 --- a/drivers/infiniband/hw/ehca/ehca_pd.c +++ b/drivers/staging/rdma/ehca/ehca_pd.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h index 90c4efa67586..90c4efa67586 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/staging/rdma/ehca/ehca_qes.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c index 2e89356c46fa..2e89356c46fa 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/staging/rdma/ehca/ehca_qp.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c index 47f94984353d..47f94984353d 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/staging/rdma/ehca/ehca_reqs.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c index 376b031c2c7f..376b031c2c7f 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/staging/rdma/ehca/ehca_sqp.c | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h index d280b12aae64..d280b12aae64 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/staging/rdma/ehca/ehca_tools.h | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c index 1a1d5d99fcf9..1a1d5d99fcf9 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/staging/rdma/ehca/ehca_uverbs.c | |||
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c index 89517ffb4389..89517ffb4389 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/staging/rdma/ehca/hcp_if.c | |||
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h index a46e514c367b..a46e514c367b 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.h +++ b/drivers/staging/rdma/ehca/hcp_if.h | |||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c index 077376ff3d28..077376ff3d28 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/staging/rdma/ehca/hcp_phyp.c | |||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h index d1b029910249..d1b029910249 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/staging/rdma/ehca/hcp_phyp.h | |||
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h index 9dac93d02140..9dac93d02140 100644 --- a/drivers/infiniband/hw/ehca/hipz_fns.h +++ b/drivers/staging/rdma/ehca/hipz_fns.h | |||
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h index 868735fd3187..868735fd3187 100644 --- a/drivers/infiniband/hw/ehca/hipz_fns_core.h +++ b/drivers/staging/rdma/ehca/hipz_fns_core.h | |||
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h index bf996c7acc42..bf996c7acc42 100644 --- a/drivers/infiniband/hw/ehca/hipz_hw.h +++ b/drivers/staging/rdma/ehca/hipz_hw.h | |||
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c index 7ffc748cb973..7ffc748cb973 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/staging/rdma/ehca/ipz_pt_fn.c | |||
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h index a801274ea337..a801274ea337 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/staging/rdma/ehca/ipz_pt_fn.h | |||
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 654eafef1d30..aa58e597df06 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c | |||
@@ -2710,7 +2710,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) | |||
2710 | if (sleep_ok) { | 2710 | if (sleep_ok) { |
2711 | mutex_lock(&ppd->hls_lock); | 2711 | mutex_lock(&ppd->hls_lock); |
2712 | } else { | 2712 | } else { |
2713 | while (mutex_trylock(&ppd->hls_lock) == EBUSY) | 2713 | while (!mutex_trylock(&ppd->hls_lock)) |
2714 | udelay(1); | 2714 | udelay(1); |
2715 | } | 2715 | } |
2716 | 2716 | ||
@@ -2758,7 +2758,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) | |||
2758 | if (sleep_ok) { | 2758 | if (sleep_ok) { |
2759 | mutex_lock(&dd->pport->hls_lock); | 2759 | mutex_lock(&dd->pport->hls_lock); |
2760 | } else { | 2760 | } else { |
2761 | while (mutex_trylock(&dd->pport->hls_lock) == EBUSY) | 2761 | while (!mutex_trylock(&dd->pport->hls_lock)) |
2762 | udelay(1); | 2762 | udelay(1); |
2763 | } | 2763 | } |
2764 | 2764 | ||
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c index 07c87a87775f..bc26a5392712 100644 --- a/drivers/staging/rdma/hfi1/device.c +++ b/drivers/staging/rdma/hfi1/device.c | |||
@@ -57,11 +57,13 @@ | |||
57 | #include "device.h" | 57 | #include "device.h" |
58 | 58 | ||
59 | static struct class *class; | 59 | static struct class *class; |
60 | static struct class *user_class; | ||
60 | static dev_t hfi1_dev; | 61 | static dev_t hfi1_dev; |
61 | 62 | ||
62 | int hfi1_cdev_init(int minor, const char *name, | 63 | int hfi1_cdev_init(int minor, const char *name, |
63 | const struct file_operations *fops, | 64 | const struct file_operations *fops, |
64 | struct cdev *cdev, struct device **devp) | 65 | struct cdev *cdev, struct device **devp, |
66 | bool user_accessible) | ||
65 | { | 67 | { |
66 | const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); | 68 | const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); |
67 | struct device *device = NULL; | 69 | struct device *device = NULL; |
@@ -78,7 +80,11 @@ int hfi1_cdev_init(int minor, const char *name, | |||
78 | goto done; | 80 | goto done; |
79 | } | 81 | } |
80 | 82 | ||
81 | device = device_create(class, NULL, dev, NULL, "%s", name); | 83 | if (user_accessible) |
84 | device = device_create(user_class, NULL, dev, NULL, "%s", name); | ||
85 | else | ||
86 | device = device_create(class, NULL, dev, NULL, "%s", name); | ||
87 | |||
82 | if (!IS_ERR(device)) | 88 | if (!IS_ERR(device)) |
83 | goto done; | 89 | goto done; |
84 | ret = PTR_ERR(device); | 90 | ret = PTR_ERR(device); |
@@ -110,6 +116,26 @@ const char *class_name(void) | |||
110 | return hfi1_class_name; | 116 | return hfi1_class_name; |
111 | } | 117 | } |
112 | 118 | ||
119 | static char *hfi1_devnode(struct device *dev, umode_t *mode) | ||
120 | { | ||
121 | if (mode) | ||
122 | *mode = 0600; | ||
123 | return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); | ||
124 | } | ||
125 | |||
126 | static const char *hfi1_class_name_user = "hfi1_user"; | ||
127 | const char *class_name_user(void) | ||
128 | { | ||
129 | return hfi1_class_name_user; | ||
130 | } | ||
131 | |||
132 | static char *hfi1_user_devnode(struct device *dev, umode_t *mode) | ||
133 | { | ||
134 | if (mode) | ||
135 | *mode = 0666; | ||
136 | return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); | ||
137 | } | ||
138 | |||
113 | int __init dev_init(void) | 139 | int __init dev_init(void) |
114 | { | 140 | { |
115 | int ret; | 141 | int ret; |
@@ -125,7 +151,22 @@ int __init dev_init(void) | |||
125 | ret = PTR_ERR(class); | 151 | ret = PTR_ERR(class); |
126 | pr_err("Could not create device class (err %d)\n", -ret); | 152 | pr_err("Could not create device class (err %d)\n", -ret); |
127 | unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); | 153 | unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); |
154 | goto done; | ||
128 | } | 155 | } |
156 | class->devnode = hfi1_devnode; | ||
157 | |||
158 | user_class = class_create(THIS_MODULE, class_name_user()); | ||
159 | if (IS_ERR(user_class)) { | ||
160 | ret = PTR_ERR(user_class); | ||
161 | pr_err("Could not create device class for user accessible files (err %d)\n", | ||
162 | -ret); | ||
163 | class_destroy(class); | ||
164 | class = NULL; | ||
165 | user_class = NULL; | ||
166 | unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); | ||
167 | goto done; | ||
168 | } | ||
169 | user_class->devnode = hfi1_user_devnode; | ||
129 | 170 | ||
130 | done: | 171 | done: |
131 | return ret; | 172 | return ret; |
@@ -133,10 +174,11 @@ done: | |||
133 | 174 | ||
134 | void dev_cleanup(void) | 175 | void dev_cleanup(void) |
135 | { | 176 | { |
136 | if (class) { | 177 | class_destroy(class); |
137 | class_destroy(class); | 178 | class = NULL; |
138 | class = NULL; | 179 | |
139 | } | 180 | class_destroy(user_class); |
181 | user_class = NULL; | ||
140 | 182 | ||
141 | unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); | 183 | unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); |
142 | } | 184 | } |
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h index 98caecd3d807..2850ff739d81 100644 --- a/drivers/staging/rdma/hfi1/device.h +++ b/drivers/staging/rdma/hfi1/device.h | |||
@@ -52,7 +52,8 @@ | |||
52 | 52 | ||
53 | int hfi1_cdev_init(int minor, const char *name, | 53 | int hfi1_cdev_init(int minor, const char *name, |
54 | const struct file_operations *fops, | 54 | const struct file_operations *fops, |
55 | struct cdev *cdev, struct device **devp); | 55 | struct cdev *cdev, struct device **devp, |
56 | bool user_accessible); | ||
56 | void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); | 57 | void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); |
57 | const char *class_name(void); | 58 | const char *class_name(void); |
58 | int __init dev_init(void); | 59 | int __init dev_init(void); |
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 6777d6b659cf..3e8d5ac4c626 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c | |||
@@ -292,7 +292,7 @@ int hfi1_diag_add(struct hfi1_devdata *dd) | |||
292 | if (atomic_inc_return(&diagpkt_count) == 1) { | 292 | if (atomic_inc_return(&diagpkt_count) == 1) { |
293 | ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, | 293 | ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, |
294 | &diagpkt_file_ops, &diagpkt_cdev, | 294 | &diagpkt_file_ops, &diagpkt_cdev, |
295 | &diagpkt_device); | 295 | &diagpkt_device, false); |
296 | } | 296 | } |
297 | 297 | ||
298 | return ret; | 298 | return ret; |
@@ -592,7 +592,8 @@ static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name) | |||
592 | 592 | ||
593 | ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, | 593 | ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, |
594 | &snoop_file_ops, | 594 | &snoop_file_ops, |
595 | &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev); | 595 | &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev, |
596 | false); | ||
596 | 597 | ||
597 | if (ret) { | 598 | if (ret) { |
598 | dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); | 599 | dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); |
@@ -1012,11 +1013,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | |||
1012 | case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: | 1013 | case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: |
1013 | memset(&link_info, 0, sizeof(link_info)); | 1014 | memset(&link_info, 0, sizeof(link_info)); |
1014 | 1015 | ||
1015 | ret = copy_from_user(&link_info, | 1016 | if (copy_from_user(&link_info, |
1016 | (struct hfi1_link_info __user *)arg, | 1017 | (struct hfi1_link_info __user *)arg, |
1017 | sizeof(link_info)); | 1018 | sizeof(link_info))) |
1018 | if (ret) | 1019 | ret = -EFAULT; |
1019 | break; | ||
1020 | 1020 | ||
1021 | value = link_info.port_state; | 1021 | value = link_info.port_state; |
1022 | index = link_info.port_number; | 1022 | index = link_info.port_number; |
@@ -1080,9 +1080,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | |||
1080 | case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: | 1080 | case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: |
1081 | if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { | 1081 | if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { |
1082 | memset(&link_info, 0, sizeof(link_info)); | 1082 | memset(&link_info, 0, sizeof(link_info)); |
1083 | ret = copy_from_user(&link_info, | 1083 | if (copy_from_user(&link_info, |
1084 | (struct hfi1_link_info __user *)arg, | 1084 | (struct hfi1_link_info __user *)arg, |
1085 | sizeof(link_info)); | 1085 | sizeof(link_info))) |
1086 | ret = -EFAULT; | ||
1086 | index = link_info.port_number; | 1087 | index = link_info.port_number; |
1087 | } else { | 1088 | } else { |
1088 | ret = __get_user(index, (int __user *) arg); | 1089 | ret = __get_user(index, (int __user *) arg); |
@@ -1114,9 +1115,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | |||
1114 | ppd->link_speed_active; | 1115 | ppd->link_speed_active; |
1115 | link_info.link_width_active = | 1116 | link_info.link_width_active = |
1116 | ppd->link_width_active; | 1117 | ppd->link_width_active; |
1117 | ret = copy_to_user( | 1118 | if (copy_to_user( |
1118 | (struct hfi1_link_info __user *)arg, | 1119 | (struct hfi1_link_info __user *)arg, |
1119 | &link_info, sizeof(link_info)); | 1120 | &link_info, sizeof(link_info))) |
1121 | ret = -EFAULT; | ||
1120 | } else { | 1122 | } else { |
1121 | ret = __put_user(value, (int __user *)arg); | 1123 | ret = __put_user(value, (int __user *)arg); |
1122 | } | 1124 | } |
@@ -1142,10 +1144,9 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | |||
1142 | snoop_dbg("Setting filter"); | 1144 | snoop_dbg("Setting filter"); |
1143 | /* just copy command structure */ | 1145 | /* just copy command structure */ |
1144 | argp = (unsigned long *)arg; | 1146 | argp = (unsigned long *)arg; |
1145 | ret = copy_from_user(&filter_cmd, (void __user *)argp, | 1147 | if (copy_from_user(&filter_cmd, (void __user *)argp, |
1146 | sizeof(filter_cmd)); | 1148 | sizeof(filter_cmd))) { |
1147 | if (ret < 0) { | 1149 | ret = -EFAULT; |
1148 | pr_alert("Error copying filter command\n"); | ||
1149 | break; | 1150 | break; |
1150 | } | 1151 | } |
1151 | if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { | 1152 | if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { |
@@ -1167,12 +1168,11 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | |||
1167 | break; | 1168 | break; |
1168 | } | 1169 | } |
1169 | /* copy remaining data from userspace */ | 1170 | /* copy remaining data from userspace */ |
1170 | ret = copy_from_user((u8 *)filter_value, | 1171 | if (copy_from_user((u8 *)filter_value, |
1171 | (void __user *)filter_cmd.value_ptr, | 1172 | (void __user *)filter_cmd.value_ptr, |
1172 | filter_cmd.length); | 1173 | filter_cmd.length)) { |
1173 | if (ret < 0) { | ||
1174 | kfree(filter_value); | 1174 | kfree(filter_value); |
1175 | pr_alert("Error copying filter data\n"); | 1175 | ret = -EFAULT; |
1176 | break; | 1176 | break; |
1177 | } | 1177 | } |
1178 | /* Drain packets first */ | 1178 | /* Drain packets first */ |
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 469861750b76..72d38500d8ce 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c | |||
@@ -1181,6 +1181,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) | |||
1181 | struct hfi1_filedata *fd = fp->private_data; | 1181 | struct hfi1_filedata *fd = fp->private_data; |
1182 | int ret = 0; | 1182 | int ret = 0; |
1183 | 1183 | ||
1184 | memset(&cinfo, 0, sizeof(cinfo)); | ||
1184 | ret = hfi1_get_base_kinfo(uctxt, &cinfo); | 1185 | ret = hfi1_get_base_kinfo(uctxt, &cinfo); |
1185 | if (ret < 0) | 1186 | if (ret < 0) |
1186 | goto done; | 1187 | goto done; |
@@ -2089,14 +2090,16 @@ static int user_add(struct hfi1_devdata *dd) | |||
2089 | 2090 | ||
2090 | if (atomic_inc_return(&user_count) == 1) { | 2091 | if (atomic_inc_return(&user_count) == 1) { |
2091 | ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, | 2092 | ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, |
2092 | &wildcard_cdev, &wildcard_device); | 2093 | &wildcard_cdev, &wildcard_device, |
2094 | true); | ||
2093 | if (ret) | 2095 | if (ret) |
2094 | goto done; | 2096 | goto done; |
2095 | } | 2097 | } |
2096 | 2098 | ||
2097 | snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); | 2099 | snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); |
2098 | ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, | 2100 | ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, |
2099 | &dd->user_cdev, &dd->user_device); | 2101 | &dd->user_cdev, &dd->user_device, |
2102 | true); | ||
2100 | if (ret) | 2103 | if (ret) |
2101 | goto done; | 2104 | goto done; |
2102 | 2105 | ||
@@ -2104,7 +2107,8 @@ static int user_add(struct hfi1_devdata *dd) | |||
2104 | snprintf(name, sizeof(name), | 2107 | snprintf(name, sizeof(name), |
2105 | "%s_ui%d", class_name(), dd->unit); | 2108 | "%s_ui%d", class_name(), dd->unit); |
2106 | ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, | 2109 | ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, |
2107 | &dd->ui_cdev, &dd->ui_device); | 2110 | &dd->ui_cdev, &dd->ui_device, |
2111 | false); | ||
2108 | if (ret) | 2112 | if (ret) |
2109 | goto done; | 2113 | goto done; |
2110 | } | 2114 | } |
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 37269eb90c34..b2c1b72d38ce 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c | |||
@@ -1717,9 +1717,9 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, | |||
1717 | psi->port_states.portphysstate_portstate = | 1717 | psi->port_states.portphysstate_portstate = |
1718 | (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf); | 1718 | (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf); |
1719 | psi->link_width_downgrade_tx_active = | 1719 | psi->link_width_downgrade_tx_active = |
1720 | ppd->link_width_downgrade_tx_active; | 1720 | cpu_to_be16(ppd->link_width_downgrade_tx_active); |
1721 | psi->link_width_downgrade_rx_active = | 1721 | psi->link_width_downgrade_rx_active = |
1722 | ppd->link_width_downgrade_rx_active; | 1722 | cpu_to_be16(ppd->link_width_downgrade_rx_active); |
1723 | if (resp_len) | 1723 | if (resp_len) |
1724 | *resp_len += sizeof(struct opa_port_state_info); | 1724 | *resp_len += sizeof(struct opa_port_state_info); |
1725 | 1725 | ||
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index a8c903caecce..aecd1a74741c 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c | |||
@@ -737,7 +737,7 @@ u16 sdma_get_descq_cnt(void) | |||
737 | */ | 737 | */ |
738 | if (!is_power_of_2(count)) | 738 | if (!is_power_of_2(count)) |
739 | return SDMA_DESCQ_CNT; | 739 | return SDMA_DESCQ_CNT; |
740 | if (count < 64 && count > 32768) | 740 | if (count < 64 || count > 32768) |
741 | return SDMA_DESCQ_CNT; | 741 | return SDMA_DESCQ_CNT; |
742 | return count; | 742 | return count; |
743 | } | 743 | } |
@@ -1848,7 +1848,7 @@ static void dump_sdma_state(struct sdma_engine *sde) | |||
1848 | dd_dev_err(sde->dd, | 1848 | dd_dev_err(sde->dd, |
1849 | "\taidx: %u amode: %u alen: %u\n", | 1849 | "\taidx: %u amode: %u alen: %u\n", |
1850 | (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) | 1850 | (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) |
1851 | >> SDMA_DESC1_HEADER_INDEX_MASK), | 1851 | >> SDMA_DESC1_HEADER_INDEX_SHIFT), |
1852 | (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) | 1852 | (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) |
1853 | >> SDMA_DESC1_HEADER_MODE_SHIFT), | 1853 | >> SDMA_DESC1_HEADER_MODE_SHIFT), |
1854 | (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) | 1854 | (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) |
@@ -1926,7 +1926,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) | |||
1926 | if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) | 1926 | if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) |
1927 | seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", | 1927 | seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", |
1928 | (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) | 1928 | (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) |
1929 | >> SDMA_DESC1_HEADER_INDEX_MASK), | 1929 | >> SDMA_DESC1_HEADER_INDEX_SHIFT), |
1930 | (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) | 1930 | (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) |
1931 | >> SDMA_DESC1_HEADER_MODE_SHIFT)); | 1931 | >> SDMA_DESC1_HEADER_MODE_SHIFT)); |
1932 | head = (head + 1) & sde->sdma_mask; | 1932 | head = (head + 1) & sde->sdma_mask; |
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 1e613fcd8f4c..496086903891 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h | |||
@@ -109,53 +109,53 @@ | |||
109 | /* | 109 | /* |
110 | * Bits defined in the send DMA descriptor. | 110 | * Bits defined in the send DMA descriptor. |
111 | */ | 111 | */ |
112 | #define SDMA_DESC0_FIRST_DESC_FLAG (1ULL<<63) | 112 | #define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63) |
113 | #define SDMA_DESC0_LAST_DESC_FLAG (1ULL<<62) | 113 | #define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62) |
114 | #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 | 114 | #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 |
115 | #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 | 115 | #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 |
116 | #define SDMA_DESC0_BYTE_COUNT_MASK \ | 116 | #define SDMA_DESC0_BYTE_COUNT_MASK \ |
117 | ((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL) | 117 | ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1) |
118 | #define SDMA_DESC0_BYTE_COUNT_SMASK \ | 118 | #define SDMA_DESC0_BYTE_COUNT_SMASK \ |
119 | (SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT) | 119 | (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT) |
120 | #define SDMA_DESC0_PHY_ADDR_SHIFT 0 | 120 | #define SDMA_DESC0_PHY_ADDR_SHIFT 0 |
121 | #define SDMA_DESC0_PHY_ADDR_WIDTH 48 | 121 | #define SDMA_DESC0_PHY_ADDR_WIDTH 48 |
122 | #define SDMA_DESC0_PHY_ADDR_MASK \ | 122 | #define SDMA_DESC0_PHY_ADDR_MASK \ |
123 | ((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL) | 123 | ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1) |
124 | #define SDMA_DESC0_PHY_ADDR_SMASK \ | 124 | #define SDMA_DESC0_PHY_ADDR_SMASK \ |
125 | (SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT) | 125 | (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT) |
126 | 126 | ||
127 | #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 | 127 | #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 |
128 | #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 | 128 | #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 |
129 | #define SDMA_DESC1_HEADER_UPDATE1_MASK \ | 129 | #define SDMA_DESC1_HEADER_UPDATE1_MASK \ |
130 | ((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL) | 130 | ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1) |
131 | #define SDMA_DESC1_HEADER_UPDATE1_SMASK \ | 131 | #define SDMA_DESC1_HEADER_UPDATE1_SMASK \ |
132 | (SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT) | 132 | (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT) |
133 | #define SDMA_DESC1_HEADER_MODE_SHIFT 13 | 133 | #define SDMA_DESC1_HEADER_MODE_SHIFT 13 |
134 | #define SDMA_DESC1_HEADER_MODE_WIDTH 3 | 134 | #define SDMA_DESC1_HEADER_MODE_WIDTH 3 |
135 | #define SDMA_DESC1_HEADER_MODE_MASK \ | 135 | #define SDMA_DESC1_HEADER_MODE_MASK \ |
136 | ((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL) | 136 | ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1) |
137 | #define SDMA_DESC1_HEADER_MODE_SMASK \ | 137 | #define SDMA_DESC1_HEADER_MODE_SMASK \ |
138 | (SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT) | 138 | (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT) |
139 | #define SDMA_DESC1_HEADER_INDEX_SHIFT 8 | 139 | #define SDMA_DESC1_HEADER_INDEX_SHIFT 8 |
140 | #define SDMA_DESC1_HEADER_INDEX_WIDTH 5 | 140 | #define SDMA_DESC1_HEADER_INDEX_WIDTH 5 |
141 | #define SDMA_DESC1_HEADER_INDEX_MASK \ | 141 | #define SDMA_DESC1_HEADER_INDEX_MASK \ |
142 | ((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL) | 142 | ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1) |
143 | #define SDMA_DESC1_HEADER_INDEX_SMASK \ | 143 | #define SDMA_DESC1_HEADER_INDEX_SMASK \ |
144 | (SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT) | 144 | (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT) |
145 | #define SDMA_DESC1_HEADER_DWS_SHIFT 4 | 145 | #define SDMA_DESC1_HEADER_DWS_SHIFT 4 |
146 | #define SDMA_DESC1_HEADER_DWS_WIDTH 4 | 146 | #define SDMA_DESC1_HEADER_DWS_WIDTH 4 |
147 | #define SDMA_DESC1_HEADER_DWS_MASK \ | 147 | #define SDMA_DESC1_HEADER_DWS_MASK \ |
148 | ((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL) | 148 | ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1) |
149 | #define SDMA_DESC1_HEADER_DWS_SMASK \ | 149 | #define SDMA_DESC1_HEADER_DWS_SMASK \ |
150 | (SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT) | 150 | (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT) |
151 | #define SDMA_DESC1_GENERATION_SHIFT 2 | 151 | #define SDMA_DESC1_GENERATION_SHIFT 2 |
152 | #define SDMA_DESC1_GENERATION_WIDTH 2 | 152 | #define SDMA_DESC1_GENERATION_WIDTH 2 |
153 | #define SDMA_DESC1_GENERATION_MASK \ | 153 | #define SDMA_DESC1_GENERATION_MASK \ |
154 | ((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL) | 154 | ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) |
155 | #define SDMA_DESC1_GENERATION_SMASK \ | 155 | #define SDMA_DESC1_GENERATION_SMASK \ |
156 | (SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT) | 156 | (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) |
157 | #define SDMA_DESC1_INT_REQ_FLAG (1ULL<<1) | 157 | #define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1) |
158 | #define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL<<0) | 158 | #define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0) |
159 | 159 | ||
160 | enum sdma_states { | 160 | enum sdma_states { |
161 | sdma_state_s00_hw_down, | 161 | sdma_state_s00_hw_down, |
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 53ac21431542..41bb59eb001c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c | |||
@@ -749,11 +749,13 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | |||
749 | struct verbs_txreq *tx; | 749 | struct verbs_txreq *tx; |
750 | 750 | ||
751 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); | 751 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); |
752 | if (!tx) | 752 | if (!tx) { |
753 | /* call slow path to get the lock */ | 753 | /* call slow path to get the lock */ |
754 | tx = __get_txreq(dev, qp); | 754 | tx = __get_txreq(dev, qp); |
755 | if (tx) | 755 | if (IS_ERR(tx)) |
756 | tx->qp = qp; | 756 | return tx; |
757 | } | ||
758 | tx->qp = qp; | ||
757 | return tx; | 759 | return tx; |
758 | } | 760 | } |
759 | 761 | ||
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 4299cf45f947..5e1f16c36b49 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c | |||
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) | |||
81 | __this_cpu_write(reporting_keystroke, true); | 81 | __this_cpu_write(reporting_keystroke, true); |
82 | input_report_key(virt_keyboard, KEY_DOWN, PRESSED); | 82 | input_report_key(virt_keyboard, KEY_DOWN, PRESSED); |
83 | input_report_key(virt_keyboard, KEY_DOWN, RELEASED); | 83 | input_report_key(virt_keyboard, KEY_DOWN, RELEASED); |
84 | input_sync(virt_keyboard); | ||
84 | __this_cpu_write(reporting_keystroke, false); | 85 | __this_cpu_write(reporting_keystroke, false); |
85 | 86 | ||
86 | /* reenable preemption */ | 87 | /* reenable preemption */ |
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile index fa27ee5f336c..fc790e7592fc 100644 --- a/drivers/staging/unisys/visorbus/Makefile +++ b/drivers/staging/unisys/visorbus/Makefile | |||
@@ -10,4 +10,3 @@ visorbus-y += visorchipset.o | |||
10 | visorbus-y += periodic_work.o | 10 | visorbus-y += periodic_work.o |
11 | 11 | ||
12 | ccflags-y += -Idrivers/staging/unisys/include | 12 | ccflags-y += -Idrivers/staging/unisys/include |
13 | ccflags-y += -Idrivers/staging/unisys/visorutil | ||
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c index 2309f5f2b238..a272b48bab28 100644 --- a/drivers/staging/unisys/visorbus/visorbus_main.c +++ b/drivers/staging/unisys/visorbus/visorbus_main.c | |||
@@ -37,6 +37,8 @@ static int visorbus_debugref; | |||
37 | #define POLLJIFFIES_TESTWORK 100 | 37 | #define POLLJIFFIES_TESTWORK 100 |
38 | #define POLLJIFFIES_NORMALCHANNEL 10 | 38 | #define POLLJIFFIES_NORMALCHANNEL 10 |
39 | 39 | ||
40 | static int busreg_rc = -ENODEV; /* stores the result from bus registration */ | ||
41 | |||
40 | static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env); | 42 | static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env); |
41 | static int visorbus_match(struct device *xdev, struct device_driver *xdrv); | 43 | static int visorbus_match(struct device *xdev, struct device_driver *xdrv); |
42 | static void fix_vbus_dev_info(struct visor_device *visordev); | 44 | static void fix_vbus_dev_info(struct visor_device *visordev); |
@@ -863,6 +865,9 @@ int visorbus_register_visor_driver(struct visor_driver *drv) | |||
863 | { | 865 | { |
864 | int rc = 0; | 866 | int rc = 0; |
865 | 867 | ||
868 | if (busreg_rc < 0) | ||
869 | return -ENODEV; /*can't register on a nonexistent bus*/ | ||
870 | |||
866 | drv->driver.name = drv->name; | 871 | drv->driver.name = drv->name; |
867 | drv->driver.bus = &visorbus_type; | 872 | drv->driver.bus = &visorbus_type; |
868 | drv->driver.probe = visordriver_probe_device; | 873 | drv->driver.probe = visordriver_probe_device; |
@@ -885,6 +890,8 @@ int visorbus_register_visor_driver(struct visor_driver *drv) | |||
885 | if (rc < 0) | 890 | if (rc < 0) |
886 | return rc; | 891 | return rc; |
887 | rc = register_driver_attributes(drv); | 892 | rc = register_driver_attributes(drv); |
893 | if (rc < 0) | ||
894 | driver_unregister(&drv->driver); | ||
888 | return rc; | 895 | return rc; |
889 | } | 896 | } |
890 | EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); | 897 | EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); |
@@ -1260,10 +1267,8 @@ remove_bus_instance(struct visor_device *dev) | |||
1260 | static int | 1267 | static int |
1261 | create_bus_type(void) | 1268 | create_bus_type(void) |
1262 | { | 1269 | { |
1263 | int rc = 0; | 1270 | busreg_rc = bus_register(&visorbus_type); |
1264 | 1271 | return busreg_rc; | |
1265 | rc = bus_register(&visorbus_type); | ||
1266 | return rc; | ||
1267 | } | 1272 | } |
1268 | 1273 | ||
1269 | /** Remove the one-and-only one instance of the visor bus type (visorbus_type). | 1274 | /** Remove the one-and-only one instance of the visor bus type (visorbus_type). |
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 8c9da7ea7845..9d3c1e282062 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c | |||
@@ -1189,16 +1189,16 @@ visornic_rx(struct uiscmdrsp *cmdrsp) | |||
1189 | spin_lock_irqsave(&devdata->priv_lock, flags); | 1189 | spin_lock_irqsave(&devdata->priv_lock, flags); |
1190 | atomic_dec(&devdata->num_rcvbuf_in_iovm); | 1190 | atomic_dec(&devdata->num_rcvbuf_in_iovm); |
1191 | 1191 | ||
1192 | /* update rcv stats - call it with priv_lock held */ | ||
1193 | devdata->net_stats.rx_packets++; | ||
1194 | devdata->net_stats.rx_bytes = skb->len; | ||
1195 | |||
1196 | /* set length to how much was ACTUALLY received - | 1192 | /* set length to how much was ACTUALLY received - |
1197 | * NOTE: rcv_done_len includes actual length of data rcvd | 1193 | * NOTE: rcv_done_len includes actual length of data rcvd |
1198 | * including ethhdr | 1194 | * including ethhdr |
1199 | */ | 1195 | */ |
1200 | skb->len = cmdrsp->net.rcv.rcv_done_len; | 1196 | skb->len = cmdrsp->net.rcv.rcv_done_len; |
1201 | 1197 | ||
1198 | /* update rcv stats - call it with priv_lock held */ | ||
1199 | devdata->net_stats.rx_packets++; | ||
1200 | devdata->net_stats.rx_bytes += skb->len; | ||
1201 | |||
1202 | /* test enabled while holding lock */ | 1202 | /* test enabled while holding lock */ |
1203 | if (!(devdata->enabled && devdata->enab_dis_acked)) { | 1203 | if (!(devdata->enabled && devdata->enab_dis_acked)) { |
1204 | /* don't process it unless we're in enable mode and until | 1204 | /* don't process it unless we're in enable mode and until |
@@ -1924,13 +1924,16 @@ static int visornic_probe(struct visor_device *dev) | |||
1924 | "%s debugfs_create_dir %s failed\n", | 1924 | "%s debugfs_create_dir %s failed\n", |
1925 | __func__, netdev->name); | 1925 | __func__, netdev->name); |
1926 | err = -ENOMEM; | 1926 | err = -ENOMEM; |
1927 | goto cleanup_xmit_cmdrsp; | 1927 | goto cleanup_register_netdev; |
1928 | } | 1928 | } |
1929 | 1929 | ||
1930 | dev_info(&dev->device, "%s success netdev=%s\n", | 1930 | dev_info(&dev->device, "%s success netdev=%s\n", |
1931 | __func__, netdev->name); | 1931 | __func__, netdev->name); |
1932 | return 0; | 1932 | return 0; |
1933 | 1933 | ||
1934 | cleanup_register_netdev: | ||
1935 | unregister_netdev(netdev); | ||
1936 | |||
1934 | cleanup_napi_add: | 1937 | cleanup_napi_add: |
1935 | del_timer_sync(&devdata->irq_poll_timer); | 1938 | del_timer_sync(&devdata->irq_poll_timer); |
1936 | netif_napi_del(&devdata->napi); | 1939 | netif_napi_del(&devdata->napi); |
@@ -2128,8 +2131,9 @@ static int visornic_init(void) | |||
2128 | if (!dev_num_pool) | 2131 | if (!dev_num_pool) |
2129 | goto cleanup_workqueue; | 2132 | goto cleanup_workqueue; |
2130 | 2133 | ||
2131 | visorbus_register_visor_driver(&visornic_driver); | 2134 | err = visorbus_register_visor_driver(&visornic_driver); |
2132 | return 0; | 2135 | if (!err) |
2136 | return 0; | ||
2133 | 2137 | ||
2134 | cleanup_workqueue: | 2138 | cleanup_workqueue: |
2135 | if (visornic_timeout_reset_workqueue) { | 2139 | if (visornic_timeout_reset_workqueue) { |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index e8a52f7d6204..51d1734d5390 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) | |||
407 | TYPERANGE_UTF8, USE_INITIAL_ONLY); | 407 | TYPERANGE_UTF8, USE_INITIAL_ONLY); |
408 | if (!param) | 408 | if (!param) |
409 | goto out; | 409 | goto out; |
410 | |||
410 | /* | 411 | /* |
411 | * Extra parameters for ISER from RFC-5046 | 412 | * Extra parameters for ISER from RFC-5046 |
412 | */ | 413 | */ |
@@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate( | |||
496 | } else if (!strcmp(param->name, SESSIONTYPE)) { | 497 | } else if (!strcmp(param->name, SESSIONTYPE)) { |
497 | SET_PSTATE_NEGOTIATE(param); | 498 | SET_PSTATE_NEGOTIATE(param); |
498 | } else if (!strcmp(param->name, IFMARKER)) { | 499 | } else if (!strcmp(param->name, IFMARKER)) { |
499 | SET_PSTATE_NEGOTIATE(param); | 500 | SET_PSTATE_REJECT(param); |
500 | } else if (!strcmp(param->name, OFMARKER)) { | 501 | } else if (!strcmp(param->name, OFMARKER)) { |
501 | SET_PSTATE_NEGOTIATE(param); | 502 | SET_PSTATE_REJECT(param); |
502 | } else if (!strcmp(param->name, IFMARKINT)) { | 503 | } else if (!strcmp(param->name, IFMARKINT)) { |
503 | SET_PSTATE_REJECT(param); | 504 | SET_PSTATE_REJECT(param); |
504 | } else if (!strcmp(param->name, OFMARKINT)) { | 505 | } else if (!strcmp(param->name, OFMARKINT)) { |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index dcc424ac35d4..88ea4e4f124b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
62 | struct se_session *se_sess = se_cmd->se_sess; | 62 | struct se_session *se_sess = se_cmd->se_sess; |
63 | struct se_node_acl *nacl = se_sess->se_node_acl; | 63 | struct se_node_acl *nacl = se_sess->se_node_acl; |
64 | struct se_dev_entry *deve; | 64 | struct se_dev_entry *deve; |
65 | sense_reason_t ret = TCM_NO_SENSE; | ||
65 | 66 | ||
66 | rcu_read_lock(); | 67 | rcu_read_lock(); |
67 | deve = target_nacl_find_deve(nacl, unpacked_lun); | 68 | deve = target_nacl_find_deve(nacl, unpacked_lun); |
68 | if (deve) { | 69 | if (deve) { |
69 | atomic_long_inc(&deve->total_cmds); | 70 | atomic_long_inc(&deve->total_cmds); |
70 | 71 | ||
71 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | ||
72 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | ||
73 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | ||
74 | " Access for 0x%08llx\n", | ||
75 | se_cmd->se_tfo->get_fabric_name(), | ||
76 | unpacked_lun); | ||
77 | rcu_read_unlock(); | ||
78 | return TCM_WRITE_PROTECTED; | ||
79 | } | ||
80 | |||
81 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 72 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
82 | atomic_long_add(se_cmd->data_length, | 73 | atomic_long_add(se_cmd->data_length, |
83 | &deve->write_bytes); | 74 | &deve->write_bytes); |
@@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
93 | 84 | ||
94 | percpu_ref_get(&se_lun->lun_ref); | 85 | percpu_ref_get(&se_lun->lun_ref); |
95 | se_cmd->lun_ref_active = true; | 86 | se_cmd->lun_ref_active = true; |
87 | |||
88 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | ||
89 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | ||
90 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | ||
91 | " Access for 0x%08llx\n", | ||
92 | se_cmd->se_tfo->get_fabric_name(), | ||
93 | unpacked_lun); | ||
94 | rcu_read_unlock(); | ||
95 | ret = TCM_WRITE_PROTECTED; | ||
96 | goto ref_dev; | ||
97 | } | ||
96 | } | 98 | } |
97 | rcu_read_unlock(); | 99 | rcu_read_unlock(); |
98 | 100 | ||
@@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
109 | unpacked_lun); | 111 | unpacked_lun); |
110 | return TCM_NON_EXISTENT_LUN; | 112 | return TCM_NON_EXISTENT_LUN; |
111 | } | 113 | } |
112 | /* | ||
113 | * Force WRITE PROTECT for virtual LUN 0 | ||
114 | */ | ||
115 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
116 | (se_cmd->data_direction != DMA_NONE)) | ||
117 | return TCM_WRITE_PROTECTED; | ||
118 | 114 | ||
119 | se_lun = se_sess->se_tpg->tpg_virt_lun0; | 115 | se_lun = se_sess->se_tpg->tpg_virt_lun0; |
120 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; | 116 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; |
@@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
123 | 119 | ||
124 | percpu_ref_get(&se_lun->lun_ref); | 120 | percpu_ref_get(&se_lun->lun_ref); |
125 | se_cmd->lun_ref_active = true; | 121 | se_cmd->lun_ref_active = true; |
122 | |||
123 | /* | ||
124 | * Force WRITE PROTECT for virtual LUN 0 | ||
125 | */ | ||
126 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
127 | (se_cmd->data_direction != DMA_NONE)) { | ||
128 | ret = TCM_WRITE_PROTECTED; | ||
129 | goto ref_dev; | ||
130 | } | ||
126 | } | 131 | } |
127 | /* | 132 | /* |
128 | * RCU reference protected by percpu se_lun->lun_ref taken above that | 133 | * RCU reference protected by percpu se_lun->lun_ref taken above that |
@@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
130 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via | 135 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via |
131 | * target_core_fabric_configfs.c:target_fabric_port_release | 136 | * target_core_fabric_configfs.c:target_fabric_port_release |
132 | */ | 137 | */ |
138 | ref_dev: | ||
133 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); | 139 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
134 | atomic_long_inc(&se_cmd->se_dev->num_cmds); | 140 | atomic_long_inc(&se_cmd->se_dev->num_cmds); |
135 | 141 | ||
@@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
140 | atomic_long_add(se_cmd->data_length, | 146 | atomic_long_add(se_cmd->data_length, |
141 | &se_cmd->se_dev->read_bytes); | 147 | &se_cmd->se_dev->read_bytes); |
142 | 148 | ||
143 | return 0; | 149 | return ret; |
144 | } | 150 | } |
145 | EXPORT_SYMBOL(transport_lookup_cmd_lun); | 151 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
146 | 152 | ||
@@ -427,8 +433,6 @@ void core_disable_device_list_for_node( | |||
427 | 433 | ||
428 | hlist_del_rcu(&orig->link); | 434 | hlist_del_rcu(&orig->link); |
429 | clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); | 435 | clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); |
430 | rcu_assign_pointer(orig->se_lun, NULL); | ||
431 | rcu_assign_pointer(orig->se_lun_acl, NULL); | ||
432 | orig->lun_flags = 0; | 436 | orig->lun_flags = 0; |
433 | orig->creation_time = 0; | 437 | orig->creation_time = 0; |
434 | orig->attach_count--; | 438 | orig->attach_count--; |
@@ -439,6 +443,9 @@ void core_disable_device_list_for_node( | |||
439 | kref_put(&orig->pr_kref, target_pr_kref_release); | 443 | kref_put(&orig->pr_kref, target_pr_kref_release); |
440 | wait_for_completion(&orig->pr_comp); | 444 | wait_for_completion(&orig->pr_comp); |
441 | 445 | ||
446 | rcu_assign_pointer(orig->se_lun, NULL); | ||
447 | rcu_assign_pointer(orig->se_lun_acl, NULL); | ||
448 | |||
442 | kfree_rcu(orig, rcu_head); | 449 | kfree_rcu(orig, rcu_head); |
443 | 450 | ||
444 | core_scsi3_free_pr_reg_from_nacl(dev, nacl); | 451 | core_scsi3_free_pr_reg_from_nacl(dev, nacl); |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 9522960c7fdd..22390e0e046c 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -187,5 +187,5 @@ core_delete_hba(struct se_hba *hba) | |||
187 | 187 | ||
188 | bool target_sense_desc_format(struct se_device *dev) | 188 | bool target_sense_desc_format(struct se_device *dev) |
189 | { | 189 | { |
190 | return dev->transport->get_blocks(dev) > U32_MAX; | 190 | return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false; |
191 | } | 191 | } |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 5a9982f5d5d6..0f19e11acac2 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -105,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev) | |||
105 | mode = FMODE_READ|FMODE_EXCL; | 105 | mode = FMODE_READ|FMODE_EXCL; |
106 | if (!ib_dev->ibd_readonly) | 106 | if (!ib_dev->ibd_readonly) |
107 | mode |= FMODE_WRITE; | 107 | mode |= FMODE_WRITE; |
108 | else | ||
109 | dev->dev_flags |= DF_READ_ONLY; | ||
108 | 110 | ||
109 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); | 111 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); |
110 | if (IS_ERR(bd)) { | 112 | if (IS_ERR(bd)) { |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5ab7100de17e..e7933115087a 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
618 | struct se_device *dev, | 618 | struct se_device *dev, |
619 | struct se_node_acl *nacl, | 619 | struct se_node_acl *nacl, |
620 | struct se_lun *lun, | 620 | struct se_lun *lun, |
621 | struct se_dev_entry *deve, | 621 | struct se_dev_entry *dest_deve, |
622 | u64 mapped_lun, | 622 | u64 mapped_lun, |
623 | unsigned char *isid, | 623 | unsigned char *isid, |
624 | u64 sa_res_key, | 624 | u64 sa_res_key, |
@@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
640 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); | 640 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); |
641 | atomic_set(&pr_reg->pr_res_holders, 0); | 641 | atomic_set(&pr_reg->pr_res_holders, 0); |
642 | pr_reg->pr_reg_nacl = nacl; | 642 | pr_reg->pr_reg_nacl = nacl; |
643 | pr_reg->pr_reg_deve = deve; | 643 | /* |
644 | * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1, | ||
645 | * the se_dev_entry->pr_ref will have been already obtained by | ||
646 | * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration(). | ||
647 | * | ||
648 | * Otherwise, locate se_dev_entry now and obtain a reference until | ||
649 | * registration completes in __core_scsi3_add_registration(). | ||
650 | */ | ||
651 | if (dest_deve) { | ||
652 | pr_reg->pr_reg_deve = dest_deve; | ||
653 | } else { | ||
654 | rcu_read_lock(); | ||
655 | pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); | ||
656 | if (!pr_reg->pr_reg_deve) { | ||
657 | rcu_read_unlock(); | ||
658 | pr_err("Unable to locate PR deve %s mapped_lun: %llu\n", | ||
659 | nacl->initiatorname, mapped_lun); | ||
660 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
661 | return NULL; | ||
662 | } | ||
663 | kref_get(&pr_reg->pr_reg_deve->pr_kref); | ||
664 | rcu_read_unlock(); | ||
665 | } | ||
644 | pr_reg->pr_res_mapped_lun = mapped_lun; | 666 | pr_reg->pr_res_mapped_lun = mapped_lun; |
645 | pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; | 667 | pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; |
646 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; | 668 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; |
@@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration( | |||
936 | !(strcmp(pr_reg->pr_tport, t_port)) && | 958 | !(strcmp(pr_reg->pr_tport, t_port)) && |
937 | (pr_reg->pr_reg_tpgt == tpgt) && | 959 | (pr_reg->pr_reg_tpgt == tpgt) && |
938 | (pr_reg->pr_aptpl_target_lun == target_lun)) { | 960 | (pr_reg->pr_aptpl_target_lun == target_lun)) { |
961 | /* | ||
962 | * Obtain the ->pr_reg_deve pointer + reference, that | ||
963 | * is released by __core_scsi3_add_registration() below. | ||
964 | */ | ||
965 | rcu_read_lock(); | ||
966 | pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); | ||
967 | if (!pr_reg->pr_reg_deve) { | ||
968 | pr_err("Unable to locate PR APTPL %s mapped_lun:" | ||
969 | " %llu\n", nacl->initiatorname, mapped_lun); | ||
970 | rcu_read_unlock(); | ||
971 | continue; | ||
972 | } | ||
973 | kref_get(&pr_reg->pr_reg_deve->pr_kref); | ||
974 | rcu_read_unlock(); | ||
939 | 975 | ||
940 | pr_reg->pr_reg_nacl = nacl; | 976 | pr_reg->pr_reg_nacl = nacl; |
941 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; | 977 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; |
942 | |||
943 | list_del(&pr_reg->pr_reg_aptpl_list); | 978 | list_del(&pr_reg->pr_reg_aptpl_list); |
944 | spin_unlock(&pr_tmpl->aptpl_reg_lock); | 979 | spin_unlock(&pr_tmpl->aptpl_reg_lock); |
945 | /* | 980 | /* |
946 | * At this point all of the pointers in *pr_reg will | 981 | * At this point all of the pointers in *pr_reg will |
947 | * be setup, so go ahead and add the registration. | 982 | * be setup, so go ahead and add the registration. |
948 | */ | 983 | */ |
949 | |||
950 | __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); | 984 | __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); |
951 | /* | 985 | /* |
952 | * If this registration is the reservation holder, | 986 | * If this registration is the reservation holder, |
@@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration( | |||
1044 | 1078 | ||
1045 | __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); | 1079 | __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); |
1046 | spin_unlock(&pr_tmpl->registration_lock); | 1080 | spin_unlock(&pr_tmpl->registration_lock); |
1047 | |||
1048 | rcu_read_lock(); | ||
1049 | deve = pr_reg->pr_reg_deve; | ||
1050 | if (deve) | ||
1051 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | ||
1052 | rcu_read_unlock(); | ||
1053 | |||
1054 | /* | 1081 | /* |
1055 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. | 1082 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. |
1056 | */ | 1083 | */ |
1057 | if (!pr_reg->pr_reg_all_tg_pt || register_move) | 1084 | if (!pr_reg->pr_reg_all_tg_pt || register_move) |
1058 | return; | 1085 | goto out; |
1059 | /* | 1086 | /* |
1060 | * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 | 1087 | * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 |
1061 | * allocated in __core_scsi3_alloc_registration() | 1088 | * allocated in __core_scsi3_alloc_registration() |
@@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration( | |||
1075 | __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, | 1102 | __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, |
1076 | register_type); | 1103 | register_type); |
1077 | spin_unlock(&pr_tmpl->registration_lock); | 1104 | spin_unlock(&pr_tmpl->registration_lock); |
1078 | 1105 | /* | |
1106 | * Drop configfs group dependency reference and deve->pr_kref | ||
1107 | * obtained from __core_scsi3_alloc_registration() code. | ||
1108 | */ | ||
1079 | rcu_read_lock(); | 1109 | rcu_read_lock(); |
1080 | deve = pr_reg_tmp->pr_reg_deve; | 1110 | deve = pr_reg_tmp->pr_reg_deve; |
1081 | if (deve) | 1111 | if (deve) { |
1082 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | 1112 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); |
1113 | core_scsi3_lunacl_undepend_item(deve); | ||
1114 | pr_reg_tmp->pr_reg_deve = NULL; | ||
1115 | } | ||
1083 | rcu_read_unlock(); | 1116 | rcu_read_unlock(); |
1084 | |||
1085 | /* | ||
1086 | * Drop configfs group dependency reference from | ||
1087 | * __core_scsi3_alloc_registration() | ||
1088 | */ | ||
1089 | core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); | ||
1090 | } | 1117 | } |
1118 | out: | ||
1119 | /* | ||
1120 | * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration() | ||
1121 | */ | ||
1122 | rcu_read_lock(); | ||
1123 | deve = pr_reg->pr_reg_deve; | ||
1124 | if (deve) { | ||
1125 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | ||
1126 | kref_put(&deve->pr_kref, target_pr_kref_release); | ||
1127 | pr_reg->pr_reg_deve = NULL; | ||
1128 | } | ||
1129 | rcu_read_unlock(); | ||
1091 | } | 1130 | } |
1092 | 1131 | ||
1093 | static int core_scsi3_alloc_registration( | 1132 | static int core_scsi3_alloc_registration( |
@@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port( | |||
1785 | dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? | 1824 | dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? |
1786 | dest_se_deve->mapped_lun : 0); | 1825 | dest_se_deve->mapped_lun : 0); |
1787 | 1826 | ||
1788 | if (!dest_se_deve) | 1827 | if (!dest_se_deve) { |
1828 | kref_put(&local_pr_reg->pr_reg_deve->pr_kref, | ||
1829 | target_pr_kref_release); | ||
1789 | continue; | 1830 | continue; |
1790 | 1831 | } | |
1791 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1832 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
1792 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1833 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1793 | core_scsi3_tpg_undepend_item(dest_tpg); | 1834 | core_scsi3_tpg_undepend_item(dest_tpg); |
@@ -1823,9 +1864,11 @@ out: | |||
1823 | 1864 | ||
1824 | kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); | 1865 | kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); |
1825 | 1866 | ||
1826 | if (!dest_se_deve) | 1867 | if (!dest_se_deve) { |
1868 | kref_put(&local_pr_reg->pr_reg_deve->pr_kref, | ||
1869 | target_pr_kref_release); | ||
1827 | continue; | 1870 | continue; |
1828 | 1871 | } | |
1829 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1872 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
1830 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1873 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1831 | core_scsi3_tpg_undepend_item(dest_tpg); | 1874 | core_scsi3_tpg_undepend_item(dest_tpg); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 2d0381dd105c..5fb9dd7f08bb 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -668,7 +668,10 @@ int core_tpg_add_lun( | |||
668 | list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); | 668 | list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); |
669 | spin_unlock(&dev->se_port_lock); | 669 | spin_unlock(&dev->se_port_lock); |
670 | 670 | ||
671 | lun->lun_access = lun_access; | 671 | if (dev->dev_flags & DF_READ_ONLY) |
672 | lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
673 | else | ||
674 | lun->lun_access = lun_access; | ||
672 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | 675 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
673 | hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); | 676 | hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); |
674 | mutex_unlock(&tpg->tpg_lun_mutex); | 677 | mutex_unlock(&tpg->tpg_lun_mutex); |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 039004400987..5aabc4bc0d75 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -163,7 +163,7 @@ config THERMAL_EMULATION | |||
163 | 163 | ||
164 | config HISI_THERMAL | 164 | config HISI_THERMAL |
165 | tristate "Hisilicon thermal driver" | 165 | tristate "Hisilicon thermal driver" |
166 | depends on ARCH_HISI && CPU_THERMAL && OF | 166 | depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST |
167 | help | 167 | help |
168 | Enable this to plug hisilicon's thermal sensor driver into the Linux | 168 | Enable this to plug hisilicon's thermal sensor driver into the Linux |
169 | thermal framework. cpufreq is used as the cooling device to throttle | 169 | thermal framework. cpufreq is used as the cooling device to throttle |
@@ -182,7 +182,7 @@ config IMX_THERMAL | |||
182 | 182 | ||
183 | config SPEAR_THERMAL | 183 | config SPEAR_THERMAL |
184 | bool "SPEAr thermal sensor driver" | 184 | bool "SPEAr thermal sensor driver" |
185 | depends on PLAT_SPEAR | 185 | depends on PLAT_SPEAR || COMPILE_TEST |
186 | depends on OF | 186 | depends on OF |
187 | help | 187 | help |
188 | Enable this to plug the SPEAr thermal sensor driver into the Linux | 188 | Enable this to plug the SPEAr thermal sensor driver into the Linux |
@@ -190,7 +190,7 @@ config SPEAR_THERMAL | |||
190 | 190 | ||
191 | config ROCKCHIP_THERMAL | 191 | config ROCKCHIP_THERMAL |
192 | tristate "Rockchip thermal driver" | 192 | tristate "Rockchip thermal driver" |
193 | depends on ARCH_ROCKCHIP | 193 | depends on ARCH_ROCKCHIP || COMPILE_TEST |
194 | depends on RESET_CONTROLLER | 194 | depends on RESET_CONTROLLER |
195 | help | 195 | help |
196 | Rockchip thermal driver provides support for Temperature sensor | 196 | Rockchip thermal driver provides support for Temperature sensor |
@@ -208,7 +208,7 @@ config RCAR_THERMAL | |||
208 | 208 | ||
209 | config KIRKWOOD_THERMAL | 209 | config KIRKWOOD_THERMAL |
210 | tristate "Temperature sensor on Marvell Kirkwood SoCs" | 210 | tristate "Temperature sensor on Marvell Kirkwood SoCs" |
211 | depends on MACH_KIRKWOOD | 211 | depends on MACH_KIRKWOOD || COMPILE_TEST |
212 | depends on OF | 212 | depends on OF |
213 | help | 213 | help |
214 | Support for the Kirkwood thermal sensor driver into the Linux thermal | 214 | Support for the Kirkwood thermal sensor driver into the Linux thermal |
@@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL | |||
216 | 216 | ||
217 | config DOVE_THERMAL | 217 | config DOVE_THERMAL |
218 | tristate "Temperature sensor on Marvell Dove SoCs" | 218 | tristate "Temperature sensor on Marvell Dove SoCs" |
219 | depends on ARCH_DOVE || MACH_DOVE | 219 | depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST |
220 | depends on OF | 220 | depends on OF |
221 | help | 221 | help |
222 | Support for the Dove thermal sensor driver in the Linux thermal | 222 | Support for the Dove thermal sensor driver in the Linux thermal |
@@ -234,7 +234,7 @@ config DB8500_THERMAL | |||
234 | 234 | ||
235 | config ARMADA_THERMAL | 235 | config ARMADA_THERMAL |
236 | tristate "Armada 370/XP thermal management" | 236 | tristate "Armada 370/XP thermal management" |
237 | depends on ARCH_MVEBU | 237 | depends on ARCH_MVEBU || COMPILE_TEST |
238 | depends on OF | 238 | depends on OF |
239 | help | 239 | help |
240 | Enable this option if you want to have support for thermal management | 240 | Enable this option if you want to have support for thermal management |
@@ -349,11 +349,12 @@ config INTEL_PCH_THERMAL | |||
349 | programmable trip points and other information. | 349 | programmable trip points and other information. |
350 | 350 | ||
351 | menu "Texas Instruments thermal drivers" | 351 | menu "Texas Instruments thermal drivers" |
352 | depends on ARCH_HAS_BANDGAP || COMPILE_TEST | ||
352 | source "drivers/thermal/ti-soc-thermal/Kconfig" | 353 | source "drivers/thermal/ti-soc-thermal/Kconfig" |
353 | endmenu | 354 | endmenu |
354 | 355 | ||
355 | menu "Samsung thermal drivers" | 356 | menu "Samsung thermal drivers" |
356 | depends on ARCH_EXYNOS | 357 | depends on ARCH_EXYNOS || COMPILE_TEST |
357 | source "drivers/thermal/samsung/Kconfig" | 358 | source "drivers/thermal/samsung/Kconfig" |
358 | endmenu | 359 | endmenu |
359 | 360 | ||
@@ -364,7 +365,7 @@ endmenu | |||
364 | 365 | ||
365 | config QCOM_SPMI_TEMP_ALARM | 366 | config QCOM_SPMI_TEMP_ALARM |
366 | tristate "Qualcomm SPMI PMIC Temperature Alarm" | 367 | tristate "Qualcomm SPMI PMIC Temperature Alarm" |
367 | depends on OF && SPMI && IIO | 368 | depends on OF && (SPMI || COMPILE_TEST) && IIO |
368 | select REGMAP_SPMI | 369 | select REGMAP_SPMI |
369 | help | 370 | help |
370 | This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) | 371 | This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 620dcd405ff6..42c6f71bdcc1 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, | |||
262 | * efficiently. Power is stored in mW, frequency in KHz. The | 262 | * efficiently. Power is stored in mW, frequency in KHz. The |
263 | * resulting table is in ascending order. | 263 | * resulting table is in ascending order. |
264 | * | 264 | * |
265 | * Return: 0 on success, -E* on error. | 265 | * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs, |
266 | * -ENOMEM if we run out of memory or -EAGAIN if an OPP was | ||
267 | * added/enabled while the function was executing. | ||
266 | */ | 268 | */ |
267 | static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, | 269 | static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, |
268 | u32 capacitance) | 270 | u32 capacitance) |
@@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, | |||
273 | int num_opps = 0, cpu, i, ret = 0; | 275 | int num_opps = 0, cpu, i, ret = 0; |
274 | unsigned long freq; | 276 | unsigned long freq; |
275 | 277 | ||
276 | rcu_read_lock(); | ||
277 | |||
278 | for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { | 278 | for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { |
279 | dev = get_cpu_device(cpu); | 279 | dev = get_cpu_device(cpu); |
280 | if (!dev) { | 280 | if (!dev) { |
@@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, | |||
284 | } | 284 | } |
285 | 285 | ||
286 | num_opps = dev_pm_opp_get_opp_count(dev); | 286 | num_opps = dev_pm_opp_get_opp_count(dev); |
287 | if (num_opps > 0) { | 287 | if (num_opps > 0) |
288 | break; | 288 | break; |
289 | } else if (num_opps < 0) { | 289 | else if (num_opps < 0) |
290 | ret = num_opps; | 290 | return num_opps; |
291 | goto unlock; | ||
292 | } | ||
293 | } | 291 | } |
294 | 292 | ||
295 | if (num_opps == 0) { | 293 | if (num_opps == 0) |
296 | ret = -EINVAL; | 294 | return -EINVAL; |
297 | goto unlock; | ||
298 | } | ||
299 | 295 | ||
300 | power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); | 296 | power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); |
301 | if (!power_table) { | 297 | if (!power_table) |
302 | ret = -ENOMEM; | 298 | return -ENOMEM; |
303 | goto unlock; | 299 | |
304 | } | 300 | rcu_read_lock(); |
305 | 301 | ||
306 | for (freq = 0, i = 0; | 302 | for (freq = 0, i = 0; |
307 | opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); | 303 | opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); |
@@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, | |||
309 | u32 freq_mhz, voltage_mv; | 305 | u32 freq_mhz, voltage_mv; |
310 | u64 power; | 306 | u64 power; |
311 | 307 | ||
308 | if (i >= num_opps) { | ||
309 | rcu_read_unlock(); | ||
310 | ret = -EAGAIN; | ||
311 | goto free_power_table; | ||
312 | } | ||
313 | |||
312 | freq_mhz = freq / 1000000; | 314 | freq_mhz = freq / 1000000; |
313 | voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; | 315 | voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; |
314 | 316 | ||
@@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, | |||
326 | power_table[i].power = power; | 328 | power_table[i].power = power; |
327 | } | 329 | } |
328 | 330 | ||
329 | if (i == 0) { | 331 | rcu_read_unlock(); |
332 | |||
333 | if (i != num_opps) { | ||
330 | ret = PTR_ERR(opp); | 334 | ret = PTR_ERR(opp); |
331 | goto unlock; | 335 | goto free_power_table; |
332 | } | 336 | } |
333 | 337 | ||
334 | cpufreq_device->cpu_dev = dev; | 338 | cpufreq_device->cpu_dev = dev; |
335 | cpufreq_device->dyn_power_table = power_table; | 339 | cpufreq_device->dyn_power_table = power_table; |
336 | cpufreq_device->dyn_power_table_entries = i; | 340 | cpufreq_device->dyn_power_table_entries = i; |
337 | 341 | ||
338 | unlock: | 342 | return 0; |
339 | rcu_read_unlock(); | 343 | |
344 | free_power_table: | ||
345 | kfree(power_table); | ||
346 | |||
340 | return ret; | 347 | return ret; |
341 | } | 348 | } |
342 | 349 | ||
@@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
847 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); | 854 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); |
848 | if (ret) { | 855 | if (ret) { |
849 | cool_dev = ERR_PTR(ret); | 856 | cool_dev = ERR_PTR(ret); |
850 | goto free_table; | 857 | goto free_power_table; |
851 | } | 858 | } |
852 | 859 | ||
853 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | 860 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", |
@@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np, | |||
889 | 896 | ||
890 | remove_idr: | 897 | remove_idr: |
891 | release_idr(&cpufreq_idr, cpufreq_dev->id); | 898 | release_idr(&cpufreq_idr, cpufreq_dev->id); |
899 | free_power_table: | ||
900 | kfree(cpufreq_dev->dyn_power_table); | ||
892 | free_table: | 901 | free_table: |
893 | kfree(cpufreq_dev->freq_table); | 902 | kfree(cpufreq_dev->freq_table); |
894 | free_time_in_idle_timestamp: | 903 | free_time_in_idle_timestamp: |
@@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | |||
1039 | 1048 | ||
1040 | thermal_cooling_device_unregister(cpufreq_dev->cool_dev); | 1049 | thermal_cooling_device_unregister(cpufreq_dev->cool_dev); |
1041 | release_idr(&cpufreq_idr, cpufreq_dev->id); | 1050 | release_idr(&cpufreq_idr, cpufreq_dev->id); |
1051 | kfree(cpufreq_dev->dyn_power_table); | ||
1042 | kfree(cpufreq_dev->time_in_idle_timestamp); | 1052 | kfree(cpufreq_dev->time_in_idle_timestamp); |
1043 | kfree(cpufreq_dev->time_in_idle); | 1053 | kfree(cpufreq_dev->time_in_idle); |
1044 | kfree(cpufreq_dev->freq_table); | 1054 | kfree(cpufreq_dev->freq_table); |
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c index 607b62c7e611..e58bd0b658b5 100644 --- a/drivers/thermal/db8500_cpufreq_cooling.c +++ b/drivers/thermal/db8500_cpufreq_cooling.c | |||
@@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = { | |||
72 | { .compatible = "stericsson,db8500-cpufreq-cooling" }, | 72 | { .compatible = "stericsson,db8500-cpufreq-cooling" }, |
73 | {}, | 73 | {}, |
74 | }; | 74 | }; |
75 | MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match); | ||
75 | #endif | 76 | #endif |
76 | 77 | ||
77 | static struct platform_driver db8500_cpufreq_cooling_driver = { | 78 | static struct platform_driver db8500_cpufreq_cooling_driver = { |
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 9c8a7aad0252..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | #include "thermal_core.h" | 25 | #include "thermal_core.h" |
26 | 26 | ||
27 | #define INVALID_TRIP -1 | ||
28 | |||
27 | #define FRAC_BITS 10 | 29 | #define FRAC_BITS 10 |
28 | #define int_to_frac(x) ((x) << FRAC_BITS) | 30 | #define int_to_frac(x) ((x) << FRAC_BITS) |
29 | #define frac_to_int(x) ((x) >> FRAC_BITS) | 31 | #define frac_to_int(x) ((x) >> FRAC_BITS) |
@@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y) | |||
56 | 58 | ||
57 | /** | 59 | /** |
58 | * struct power_allocator_params - parameters for the power allocator governor | 60 | * struct power_allocator_params - parameters for the power allocator governor |
61 | * @allocated_tzp: whether we have allocated tzp for this thermal zone and | ||
62 | * it needs to be freed on unbind | ||
59 | * @err_integral: accumulated error in the PID controller. | 63 | * @err_integral: accumulated error in the PID controller. |
60 | * @prev_err: error in the previous iteration of the PID controller. | 64 | * @prev_err: error in the previous iteration of the PID controller. |
61 | * Used to calculate the derivative term. | 65 | * Used to calculate the derivative term. |
62 | * @trip_switch_on: first passive trip point of the thermal zone. The | 66 | * @trip_switch_on: first passive trip point of the thermal zone. The |
63 | * governor switches on when this trip point is crossed. | 67 | * governor switches on when this trip point is crossed. |
68 | * If the thermal zone only has one passive trip point, | ||
69 | * @trip_switch_on should be INVALID_TRIP. | ||
64 | * @trip_max_desired_temperature: last passive trip point of the thermal | 70 | * @trip_max_desired_temperature: last passive trip point of the thermal |
65 | * zone. The temperature we are | 71 | * zone. The temperature we are |
66 | * controlling for. | 72 | * controlling for. |
67 | */ | 73 | */ |
68 | struct power_allocator_params { | 74 | struct power_allocator_params { |
75 | bool allocated_tzp; | ||
69 | s64 err_integral; | 76 | s64 err_integral; |
70 | s32 prev_err; | 77 | s32 prev_err; |
71 | int trip_switch_on; | 78 | int trip_switch_on; |
@@ -73,6 +80,98 @@ struct power_allocator_params { | |||
73 | }; | 80 | }; |
74 | 81 | ||
75 | /** | 82 | /** |
83 | * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone | ||
84 | * @tz: thermal zone we are operating in | ||
85 | * | ||
86 | * For thermal zones that don't provide a sustainable_power in their | ||
87 | * thermal_zone_params, estimate one. Calculate it using the minimum | ||
88 | * power of all the cooling devices as that gives a valid value that | ||
89 | * can give some degree of functionality. For optimal performance of | ||
90 | * this governor, provide a sustainable_power in the thermal zone's | ||
91 | * thermal_zone_params. | ||
92 | */ | ||
93 | static u32 estimate_sustainable_power(struct thermal_zone_device *tz) | ||
94 | { | ||
95 | u32 sustainable_power = 0; | ||
96 | struct thermal_instance *instance; | ||
97 | struct power_allocator_params *params = tz->governor_data; | ||
98 | |||
99 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { | ||
100 | struct thermal_cooling_device *cdev = instance->cdev; | ||
101 | u32 min_power; | ||
102 | |||
103 | if (instance->trip != params->trip_max_desired_temperature) | ||
104 | continue; | ||
105 | |||
106 | if (power_actor_get_min_power(cdev, tz, &min_power)) | ||
107 | continue; | ||
108 | |||
109 | sustainable_power += min_power; | ||
110 | } | ||
111 | |||
112 | return sustainable_power; | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * estimate_pid_constants() - Estimate the constants for the PID controller | ||
117 | * @tz: thermal zone for which to estimate the constants | ||
118 | * @sustainable_power: sustainable power for the thermal zone | ||
119 | * @trip_switch_on: trip point number for the switch on temperature | ||
120 | * @control_temp: target temperature for the power allocator governor | ||
121 | * @force: whether to force the update of the constants | ||
122 | * | ||
123 | * This function is used to update the estimation of the PID | ||
124 | * controller constants in struct thermal_zone_parameters. | ||
125 | * Sustainable power is provided in case it was estimated. The | ||
126 | * estimated sustainable_power should not be stored in the | ||
127 | * thermal_zone_parameters so it has to be passed explicitly to this | ||
128 | * function. | ||
129 | * | ||
130 | * If @force is not set, the values in the thermal zone's parameters | ||
131 | * are preserved if they are not zero. If @force is set, the values | ||
132 | * in thermal zone's parameters are overwritten. | ||
133 | */ | ||
134 | static void estimate_pid_constants(struct thermal_zone_device *tz, | ||
135 | u32 sustainable_power, int trip_switch_on, | ||
136 | int control_temp, bool force) | ||
137 | { | ||
138 | int ret; | ||
139 | int switch_on_temp; | ||
140 | u32 temperature_threshold; | ||
141 | |||
142 | ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp); | ||
143 | if (ret) | ||
144 | switch_on_temp = 0; | ||
145 | |||
146 | temperature_threshold = control_temp - switch_on_temp; | ||
147 | /* | ||
148 | * estimate_pid_constants() tries to find appropriate default | ||
149 | * values for thermal zones that don't provide them. If a | ||
150 | * system integrator has configured a thermal zone with two | ||
151 | * passive trip points at the same temperature, that person | ||
152 | * hasn't put any effort to set up the thermal zone properly | ||
153 | * so just give up. | ||
154 | */ | ||
155 | if (!temperature_threshold) | ||
156 | return; | ||
157 | |||
158 | if (!tz->tzp->k_po || force) | ||
159 | tz->tzp->k_po = int_to_frac(sustainable_power) / | ||
160 | temperature_threshold; | ||
161 | |||
162 | if (!tz->tzp->k_pu || force) | ||
163 | tz->tzp->k_pu = int_to_frac(2 * sustainable_power) / | ||
164 | temperature_threshold; | ||
165 | |||
166 | if (!tz->tzp->k_i || force) | ||
167 | tz->tzp->k_i = int_to_frac(10) / 1000; | ||
168 | /* | ||
169 | * The default for k_d and integral_cutoff is 0, so we can | ||
170 | * leave them as they are. | ||
171 | */ | ||
172 | } | ||
173 | |||
174 | /** | ||
76 | * pid_controller() - PID controller | 175 | * pid_controller() - PID controller |
77 | * @tz: thermal zone we are operating in | 176 | * @tz: thermal zone we are operating in |
78 | * @current_temp: the current temperature in millicelsius | 177 | * @current_temp: the current temperature in millicelsius |
@@ -98,10 +197,20 @@ static u32 pid_controller(struct thermal_zone_device *tz, | |||
98 | { | 197 | { |
99 | s64 p, i, d, power_range; | 198 | s64 p, i, d, power_range; |
100 | s32 err, max_power_frac; | 199 | s32 err, max_power_frac; |
200 | u32 sustainable_power; | ||
101 | struct power_allocator_params *params = tz->governor_data; | 201 | struct power_allocator_params *params = tz->governor_data; |
102 | 202 | ||
103 | max_power_frac = int_to_frac(max_allocatable_power); | 203 | max_power_frac = int_to_frac(max_allocatable_power); |
104 | 204 | ||
205 | if (tz->tzp->sustainable_power) { | ||
206 | sustainable_power = tz->tzp->sustainable_power; | ||
207 | } else { | ||
208 | sustainable_power = estimate_sustainable_power(tz); | ||
209 | estimate_pid_constants(tz, sustainable_power, | ||
210 | params->trip_switch_on, control_temp, | ||
211 | true); | ||
212 | } | ||
213 | |||
105 | err = control_temp - current_temp; | 214 | err = control_temp - current_temp; |
106 | err = int_to_frac(err); | 215 | err = int_to_frac(err); |
107 | 216 | ||
@@ -139,7 +248,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, | |||
139 | power_range = p + i + d; | 248 | power_range = p + i + d; |
140 | 249 | ||
141 | /* feed-forward the known sustainable dissipatable power */ | 250 | /* feed-forward the known sustainable dissipatable power */ |
142 | power_range = tz->tzp->sustainable_power + frac_to_int(power_range); | 251 | power_range = sustainable_power + frac_to_int(power_range); |
143 | 252 | ||
144 | power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); | 253 | power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); |
145 | 254 | ||
@@ -247,6 +356,11 @@ static int allocate_power(struct thermal_zone_device *tz, | |||
247 | } | 356 | } |
248 | } | 357 | } |
249 | 358 | ||
359 | if (!num_actors) { | ||
360 | ret = -ENODEV; | ||
361 | goto unlock; | ||
362 | } | ||
363 | |||
250 | /* | 364 | /* |
251 | * We need to allocate five arrays of the same size: | 365 | * We need to allocate five arrays of the same size: |
252 | * req_power, max_power, granted_power, extra_actor_power and | 366 | * req_power, max_power, granted_power, extra_actor_power and |
@@ -340,43 +454,66 @@ unlock: | |||
340 | return ret; | 454 | return ret; |
341 | } | 455 | } |
342 | 456 | ||
343 | static int get_governor_trips(struct thermal_zone_device *tz, | 457 | /** |
344 | struct power_allocator_params *params) | 458 | * get_governor_trips() - get the number of the two trip points that are key for this governor |
459 | * @tz: thermal zone to operate on | ||
460 | * @params: pointer to private data for this governor | ||
461 | * | ||
462 | * The power allocator governor works optimally with two trips points: | ||
463 | * a "switch on" trip point and a "maximum desired temperature". These | ||
464 | * are defined as the first and last passive trip points. | ||
465 | * | ||
466 | * If there is only one trip point, then that's considered to be the | ||
467 | * "maximum desired temperature" trip point and the governor is always | ||
468 | * on. If there are no passive or active trip points, then the | ||
469 | * governor won't do anything. In fact, its throttle function | ||
470 | * won't be called at all. | ||
471 | */ | ||
472 | static void get_governor_trips(struct thermal_zone_device *tz, | ||
473 | struct power_allocator_params *params) | ||
345 | { | 474 | { |
346 | int i, ret, last_passive; | 475 | int i, last_active, last_passive; |
347 | bool found_first_passive; | 476 | bool found_first_passive; |
348 | 477 | ||
349 | found_first_passive = false; | 478 | found_first_passive = false; |
350 | last_passive = -1; | 479 | last_active = INVALID_TRIP; |
351 | ret = -EINVAL; | 480 | last_passive = INVALID_TRIP; |
352 | 481 | ||
353 | for (i = 0; i < tz->trips; i++) { | 482 | for (i = 0; i < tz->trips; i++) { |
354 | enum thermal_trip_type type; | 483 | enum thermal_trip_type type; |
484 | int ret; | ||
355 | 485 | ||
356 | ret = tz->ops->get_trip_type(tz, i, &type); | 486 | ret = tz->ops->get_trip_type(tz, i, &type); |
357 | if (ret) | 487 | if (ret) { |
358 | return ret; | 488 | dev_warn(&tz->device, |
489 | "Failed to get trip point %d type: %d\n", i, | ||
490 | ret); | ||
491 | continue; | ||
492 | } | ||
359 | 493 | ||
360 | if (!found_first_passive) { | 494 | if (type == THERMAL_TRIP_PASSIVE) { |
361 | if (type == THERMAL_TRIP_PASSIVE) { | 495 | if (!found_first_passive) { |
362 | params->trip_switch_on = i; | 496 | params->trip_switch_on = i; |
363 | found_first_passive = true; | 497 | found_first_passive = true; |
498 | } else { | ||
499 | last_passive = i; | ||
364 | } | 500 | } |
365 | } else if (type == THERMAL_TRIP_PASSIVE) { | 501 | } else if (type == THERMAL_TRIP_ACTIVE) { |
366 | last_passive = i; | 502 | last_active = i; |
367 | } else { | 503 | } else { |
368 | break; | 504 | break; |
369 | } | 505 | } |
370 | } | 506 | } |
371 | 507 | ||
372 | if (last_passive != -1) { | 508 | if (last_passive != INVALID_TRIP) { |
373 | params->trip_max_desired_temperature = last_passive; | 509 | params->trip_max_desired_temperature = last_passive; |
374 | ret = 0; | 510 | } else if (found_first_passive) { |
511 | params->trip_max_desired_temperature = params->trip_switch_on; | ||
512 | params->trip_switch_on = INVALID_TRIP; | ||
375 | } else { | 513 | } else { |
376 | ret = -EINVAL; | 514 | params->trip_switch_on = INVALID_TRIP; |
515 | params->trip_max_desired_temperature = last_active; | ||
377 | } | 516 | } |
378 | |||
379 | return ret; | ||
380 | } | 517 | } |
381 | 518 | ||
382 | static void reset_pid_controller(struct power_allocator_params *params) | 519 | static void reset_pid_controller(struct power_allocator_params *params) |
@@ -405,60 +542,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz) | |||
405 | * power_allocator_bind() - bind the power_allocator governor to a thermal zone | 542 | * power_allocator_bind() - bind the power_allocator governor to a thermal zone |
406 | * @tz: thermal zone to bind it to | 543 | * @tz: thermal zone to bind it to |
407 | * | 544 | * |
408 | * Check that the thermal zone is valid for this governor, that is, it | 545 | * Initialize the PID controller parameters and bind it to the thermal |
409 | * has two thermal trips. If so, initialize the PID controller | 546 | * zone. |
410 | * parameters and bind it to the thermal zone. | ||
411 | * | 547 | * |
412 | * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM | 548 | * Return: 0 on success, or -ENOMEM if we ran out of memory. |
413 | * if we ran out of memory. | ||
414 | */ | 549 | */ |
415 | static int power_allocator_bind(struct thermal_zone_device *tz) | 550 | static int power_allocator_bind(struct thermal_zone_device *tz) |
416 | { | 551 | { |
417 | int ret; | 552 | int ret; |
418 | struct power_allocator_params *params; | 553 | struct power_allocator_params *params; |
419 | int switch_on_temp, control_temp; | 554 | int control_temp; |
420 | u32 temperature_threshold; | ||
421 | |||
422 | if (!tz->tzp || !tz->tzp->sustainable_power) { | ||
423 | dev_err(&tz->device, | ||
424 | "power_allocator: missing sustainable_power\n"); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | 555 | ||
428 | params = kzalloc(sizeof(*params), GFP_KERNEL); | 556 | params = kzalloc(sizeof(*params), GFP_KERNEL); |
429 | if (!params) | 557 | if (!params) |
430 | return -ENOMEM; | 558 | return -ENOMEM; |
431 | 559 | ||
432 | ret = get_governor_trips(tz, params); | 560 | if (!tz->tzp) { |
433 | if (ret) { | 561 | tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL); |
434 | dev_err(&tz->device, | 562 | if (!tz->tzp) { |
435 | "thermal zone %s has wrong trip setup for power allocator\n", | 563 | ret = -ENOMEM; |
436 | tz->type); | 564 | goto free_params; |
437 | goto free; | 565 | } |
438 | } | ||
439 | 566 | ||
440 | ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, | 567 | params->allocated_tzp = true; |
441 | &switch_on_temp); | 568 | } |
442 | if (ret) | ||
443 | goto free; | ||
444 | 569 | ||
445 | ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, | 570 | if (!tz->tzp->sustainable_power) |
446 | &control_temp); | 571 | dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n"); |
447 | if (ret) | ||
448 | goto free; | ||
449 | 572 | ||
450 | temperature_threshold = control_temp - switch_on_temp; | 573 | get_governor_trips(tz, params); |
451 | 574 | ||
452 | tz->tzp->k_po = tz->tzp->k_po ?: | 575 | if (tz->trips > 0) { |
453 | int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; | 576 | ret = tz->ops->get_trip_temp(tz, |
454 | tz->tzp->k_pu = tz->tzp->k_pu ?: | 577 | params->trip_max_desired_temperature, |
455 | int_to_frac(2 * tz->tzp->sustainable_power) / | 578 | &control_temp); |
456 | temperature_threshold; | 579 | if (!ret) |
457 | tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; | 580 | estimate_pid_constants(tz, tz->tzp->sustainable_power, |
458 | /* | 581 | params->trip_switch_on, |
459 | * The default for k_d and integral_cutoff is 0, so we can | 582 | control_temp, false); |
460 | * leave them as they are. | 583 | } |
461 | */ | ||
462 | 584 | ||
463 | reset_pid_controller(params); | 585 | reset_pid_controller(params); |
464 | 586 | ||
@@ -466,14 +588,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz) | |||
466 | 588 | ||
467 | return 0; | 589 | return 0; |
468 | 590 | ||
469 | free: | 591 | free_params: |
470 | kfree(params); | 592 | kfree(params); |
593 | |||
471 | return ret; | 594 | return ret; |
472 | } | 595 | } |
473 | 596 | ||
474 | static void power_allocator_unbind(struct thermal_zone_device *tz) | 597 | static void power_allocator_unbind(struct thermal_zone_device *tz) |
475 | { | 598 | { |
599 | struct power_allocator_params *params = tz->governor_data; | ||
600 | |||
476 | dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); | 601 | dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); |
602 | |||
603 | if (params->allocated_tzp) { | ||
604 | kfree(tz->tzp); | ||
605 | tz->tzp = NULL; | ||
606 | } | ||
607 | |||
477 | kfree(tz->governor_data); | 608 | kfree(tz->governor_data); |
478 | tz->governor_data = NULL; | 609 | tz->governor_data = NULL; |
479 | } | 610 | } |
@@ -499,13 +630,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) | |||
499 | 630 | ||
500 | ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, | 631 | ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, |
501 | &switch_on_temp); | 632 | &switch_on_temp); |
502 | if (ret) { | 633 | if (!ret && (current_temp < switch_on_temp)) { |
503 | dev_warn(&tz->device, | ||
504 | "Failed to get switch on temperature: %d\n", ret); | ||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | if (current_temp < switch_on_temp) { | ||
509 | tz->passive = 0; | 634 | tz->passive = 0; |
510 | reset_pid_controller(params); | 635 | reset_pid_controller(params); |
511 | allow_maximum_power(tz); | 636 | allow_maximum_power(tz); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5e5fc7015c7f..d9e525cc9c1c 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -1013,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev, | |||
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | /** | 1015 | /** |
1016 | * power_actor_get_min_power() - get the mainimum power that a cdev can consume | ||
1017 | * @cdev: pointer to &thermal_cooling_device | ||
1018 | * @tz: a valid thermal zone device pointer | ||
1019 | * @min_power: pointer in which to store the minimum power | ||
1020 | * | ||
1021 | * Calculate the minimum power consumption in milliwatts that the | ||
1022 | * cooling device can currently consume and store it in @min_power. | ||
1023 | * | ||
1024 | * Return: 0 on success, -EINVAL if @cdev doesn't support the | ||
1025 | * power_actor API or -E* on other error. | ||
1026 | */ | ||
1027 | int power_actor_get_min_power(struct thermal_cooling_device *cdev, | ||
1028 | struct thermal_zone_device *tz, u32 *min_power) | ||
1029 | { | ||
1030 | unsigned long max_state; | ||
1031 | int ret; | ||
1032 | |||
1033 | if (!cdev_is_power_actor(cdev)) | ||
1034 | return -EINVAL; | ||
1035 | |||
1036 | ret = cdev->ops->get_max_state(cdev, &max_state); | ||
1037 | if (ret) | ||
1038 | return ret; | ||
1039 | |||
1040 | return cdev->ops->state2power(cdev, tz, max_state, min_power); | ||
1041 | } | ||
1042 | |||
1043 | /** | ||
1016 | * power_actor_set_power() - limit the maximum power that a cooling device can consume | 1044 | * power_actor_set_power() - limit the maximum power that a cooling device can consume |
1017 | * @cdev: pointer to &thermal_cooling_device | 1045 | * @cdev: pointer to &thermal_cooling_device |
1018 | * @instance: thermal instance to update | 1046 | * @instance: thermal instance to update |
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig index bd4c7beba679..cb6686ff09ae 100644 --- a/drivers/thermal/ti-soc-thermal/Kconfig +++ b/drivers/thermal/ti-soc-thermal/Kconfig | |||
@@ -1,7 +1,5 @@ | |||
1 | config TI_SOC_THERMAL | 1 | config TI_SOC_THERMAL |
2 | tristate "Texas Instruments SoCs temperature sensor driver" | 2 | tristate "Texas Instruments SoCs temperature sensor driver" |
3 | depends on THERMAL | ||
4 | depends on ARCH_HAS_BANDGAP | ||
5 | help | 3 | help |
6 | If you say yes here you get support for the Texas Instruments | 4 | If you say yes here you get support for the Texas Instruments |
7 | OMAP4460+ on die bandgap temperature sensor support. The register | 5 | OMAP4460+ on die bandgap temperature sensor support. The register |
@@ -24,7 +22,7 @@ config TI_THERMAL | |||
24 | config OMAP4_THERMAL | 22 | config OMAP4_THERMAL |
25 | bool "Texas Instruments OMAP4 thermal support" | 23 | bool "Texas Instruments OMAP4 thermal support" |
26 | depends on TI_SOC_THERMAL | 24 | depends on TI_SOC_THERMAL |
27 | depends on ARCH_OMAP4 | 25 | depends on ARCH_OMAP4 || COMPILE_TEST |
28 | help | 26 | help |
29 | If you say yes here you get thermal support for the Texas Instruments | 27 | If you say yes here you get thermal support for the Texas Instruments |
30 | OMAP4 SoC family. The current chip supported are: | 28 | OMAP4 SoC family. The current chip supported are: |
@@ -38,7 +36,7 @@ config OMAP4_THERMAL | |||
38 | config OMAP5_THERMAL | 36 | config OMAP5_THERMAL |
39 | bool "Texas Instruments OMAP5 thermal support" | 37 | bool "Texas Instruments OMAP5 thermal support" |
40 | depends on TI_SOC_THERMAL | 38 | depends on TI_SOC_THERMAL |
41 | depends on SOC_OMAP5 | 39 | depends on SOC_OMAP5 || COMPILE_TEST |
42 | help | 40 | help |
43 | If you say yes here you get thermal support for the Texas Instruments | 41 | If you say yes here you get thermal support for the Texas Instruments |
44 | OMAP5 SoC family. The current chip supported are: | 42 | OMAP5 SoC family. The current chip supported are: |
@@ -50,7 +48,7 @@ config OMAP5_THERMAL | |||
50 | config DRA752_THERMAL | 48 | config DRA752_THERMAL |
51 | bool "Texas Instruments DRA752 thermal support" | 49 | bool "Texas Instruments DRA752 thermal support" |
52 | depends on TI_SOC_THERMAL | 50 | depends on TI_SOC_THERMAL |
53 | depends on SOC_DRA7XX | 51 | depends on SOC_DRA7XX || COMPILE_TEST |
54 | help | 52 | help |
55 | If you say yes here you get thermal support for the Texas Instruments | 53 | If you say yes here you get thermal support for the Texas Instruments |
56 | DRA752 SoC family. The current chip supported are: | 54 | DRA752 SoC family. The current chip supported are: |
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index c68fe1222c16..20a41f7de76f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c | |||
@@ -643,7 +643,7 @@ static struct pci_device_id nhi_ids[] = { | |||
643 | { | 643 | { |
644 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | 644 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
645 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, | 645 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, |
646 | .subvendor = 0x2222, .subdevice = 0x1111, | 646 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
647 | }, | 647 | }, |
648 | { 0,} | 648 | { 0,} |
649 | }; | 649 | }; |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 20932cc9c8f7..b09023b07169 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty) | |||
343 | spin_lock_irqsave(&tty->ctrl_lock, flags); | 343 | spin_lock_irqsave(&tty->ctrl_lock, flags); |
344 | tty->ctrl_status |= TIOCPKT_FLUSHREAD; | 344 | tty->ctrl_status |= TIOCPKT_FLUSHREAD; |
345 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | 345 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
346 | if (waitqueue_active(&tty->link->read_wait)) | 346 | wake_up_interruptible(&tty->link->read_wait); |
347 | wake_up_interruptible(&tty->link->read_wait); | ||
348 | } | 347 | } |
349 | } | 348 | } |
350 | 349 | ||
@@ -1382,8 +1381,7 @@ handle_newline: | |||
1382 | put_tty_queue(c, ldata); | 1381 | put_tty_queue(c, ldata); |
1383 | smp_store_release(&ldata->canon_head, ldata->read_head); | 1382 | smp_store_release(&ldata->canon_head, ldata->read_head); |
1384 | kill_fasync(&tty->fasync, SIGIO, POLL_IN); | 1383 | kill_fasync(&tty->fasync, SIGIO, POLL_IN); |
1385 | if (waitqueue_active(&tty->read_wait)) | 1384 | wake_up_interruptible_poll(&tty->read_wait, POLLIN); |
1386 | wake_up_interruptible_poll(&tty->read_wait, POLLIN); | ||
1387 | return 0; | 1385 | return 0; |
1388 | } | 1386 | } |
1389 | } | 1387 | } |
@@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
1667 | 1665 | ||
1668 | if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { | 1666 | if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { |
1669 | kill_fasync(&tty->fasync, SIGIO, POLL_IN); | 1667 | kill_fasync(&tty->fasync, SIGIO, POLL_IN); |
1670 | if (waitqueue_active(&tty->read_wait)) | 1668 | wake_up_interruptible_poll(&tty->read_wait, POLLIN); |
1671 | wake_up_interruptible_poll(&tty->read_wait, POLLIN); | ||
1672 | } | 1669 | } |
1673 | } | 1670 | } |
1674 | 1671 | ||
@@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1887 | } | 1884 | } |
1888 | 1885 | ||
1889 | /* The termios change make the tty ready for I/O */ | 1886 | /* The termios change make the tty ready for I/O */ |
1890 | if (waitqueue_active(&tty->write_wait)) | 1887 | wake_up_interruptible(&tty->write_wait); |
1891 | wake_up_interruptible(&tty->write_wait); | 1888 | wake_up_interruptible(&tty->read_wait); |
1892 | if (waitqueue_active(&tty->read_wait)) | ||
1893 | wake_up_interruptible(&tty->read_wait); | ||
1894 | } | 1889 | } |
1895 | 1890 | ||
1896 | /** | 1891 | /** |
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 54e6c8ddef5d..0bbf34035d6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c | |||
@@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */ | |||
261 | UART_FCR7_64BYTE, | 261 | UART_FCR7_64BYTE, |
262 | .flags = UART_CAP_FIFO, | 262 | .flags = UART_CAP_FIFO, |
263 | }, | 263 | }, |
264 | [PORT_RT2880] = { | ||
265 | .name = "Palmchip BK-3103", | ||
266 | .fifo_size = 16, | ||
267 | .tx_loadsz = 16, | ||
268 | .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, | ||
269 | .rxtrig_bytes = {1, 4, 8, 14}, | ||
270 | .flags = UART_CAP_FIFO, | ||
271 | }, | ||
264 | }; | 272 | }; |
265 | 273 | ||
266 | /* Uart divisor latch read */ | 274 | /* Uart divisor latch read */ |
@@ -2910,3 +2918,5 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) | |||
2910 | } | 2918 | } |
2911 | 2919 | ||
2912 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ | 2920 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ |
2921 | |||
2922 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 5ca5cf3e9359..538ea03bc101 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev) | |||
2786 | ret = atmel_init_gpios(port, &pdev->dev); | 2786 | ret = atmel_init_gpios(port, &pdev->dev); |
2787 | if (ret < 0) { | 2787 | if (ret < 0) { |
2788 | dev_err(&pdev->dev, "Failed to initialize GPIOs."); | 2788 | dev_err(&pdev->dev, "Failed to initialize GPIOs."); |
2789 | goto err; | 2789 | goto err_clear_bit; |
2790 | } | 2790 | } |
2791 | 2791 | ||
2792 | ret = atmel_init_port(port, pdev); | 2792 | ret = atmel_init_port(port, pdev); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fe3d41cc8416..d0388a071ba1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count) | |||
1631 | int locked = 1; | 1631 | int locked = 1; |
1632 | int retval; | 1632 | int retval; |
1633 | 1633 | ||
1634 | retval = clk_prepare_enable(sport->clk_per); | 1634 | retval = clk_enable(sport->clk_per); |
1635 | if (retval) | 1635 | if (retval) |
1636 | return; | 1636 | return; |
1637 | retval = clk_prepare_enable(sport->clk_ipg); | 1637 | retval = clk_enable(sport->clk_ipg); |
1638 | if (retval) { | 1638 | if (retval) { |
1639 | clk_disable_unprepare(sport->clk_per); | 1639 | clk_disable(sport->clk_per); |
1640 | return; | 1640 | return; |
1641 | } | 1641 | } |
1642 | 1642 | ||
@@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) | |||
1675 | if (locked) | 1675 | if (locked) |
1676 | spin_unlock_irqrestore(&sport->port.lock, flags); | 1676 | spin_unlock_irqrestore(&sport->port.lock, flags); |
1677 | 1677 | ||
1678 | clk_disable_unprepare(sport->clk_ipg); | 1678 | clk_disable(sport->clk_ipg); |
1679 | clk_disable_unprepare(sport->clk_per); | 1679 | clk_disable(sport->clk_per); |
1680 | } | 1680 | } |
1681 | 1681 | ||
1682 | /* | 1682 | /* |
@@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options) | |||
1777 | 1777 | ||
1778 | retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); | 1778 | retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); |
1779 | 1779 | ||
1780 | clk_disable_unprepare(sport->clk_ipg); | 1780 | clk_disable(sport->clk_ipg); |
1781 | if (retval) { | ||
1782 | clk_unprepare(sport->clk_ipg); | ||
1783 | goto error_console; | ||
1784 | } | ||
1785 | |||
1786 | retval = clk_prepare(sport->clk_per); | ||
1787 | if (retval) | ||
1788 | clk_disable_unprepare(sport->clk_ipg); | ||
1781 | 1789 | ||
1782 | error_console: | 1790 | error_console: |
1783 | return retval; | 1791 | return retval; |
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5a3fa8913880..a660ab181cca 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c | |||
@@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) | |||
242 | atomic_inc(&buf->priority); | 242 | atomic_inc(&buf->priority); |
243 | 243 | ||
244 | mutex_lock(&buf->lock); | 244 | mutex_lock(&buf->lock); |
245 | while ((next = buf->head->next) != NULL) { | 245 | /* paired w/ release in __tty_buffer_request_room; ensures there are |
246 | * no pending memory accesses to the freed buffer | ||
247 | */ | ||
248 | while ((next = smp_load_acquire(&buf->head->next)) != NULL) { | ||
246 | tty_buffer_free(port, buf->head); | 249 | tty_buffer_free(port, buf->head); |
247 | buf->head = next; | 250 | buf->head = next; |
248 | } | 251 | } |
@@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, | |||
290 | if (n != NULL) { | 293 | if (n != NULL) { |
291 | n->flags = flags; | 294 | n->flags = flags; |
292 | buf->tail = n; | 295 | buf->tail = n; |
293 | b->commit = b->used; | 296 | /* paired w/ acquire in flush_to_ldisc(); ensures |
297 | * flush_to_ldisc() sees buffer data. | ||
298 | */ | ||
299 | smp_store_release(&b->commit, b->used); | ||
294 | /* paired w/ acquire in flush_to_ldisc(); ensures the | 300 | /* paired w/ acquire in flush_to_ldisc(); ensures the |
295 | * latest commit value can be read before the head is | 301 | * latest commit value can be read before the head is |
296 | * advanced to the next buffer | 302 | * advanced to the next buffer |
@@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port) | |||
393 | { | 399 | { |
394 | struct tty_bufhead *buf = &port->buf; | 400 | struct tty_bufhead *buf = &port->buf; |
395 | 401 | ||
396 | buf->tail->commit = buf->tail->used; | 402 | /* paired w/ acquire in flush_to_ldisc(); ensures |
403 | * flush_to_ldisc() sees buffer data. | ||
404 | */ | ||
405 | smp_store_release(&buf->tail->commit, buf->tail->used); | ||
397 | schedule_work(&buf->work); | 406 | schedule_work(&buf->work); |
398 | } | 407 | } |
399 | EXPORT_SYMBOL(tty_schedule_flip); | 408 | EXPORT_SYMBOL(tty_schedule_flip); |
@@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work) | |||
467 | struct tty_struct *tty; | 476 | struct tty_struct *tty; |
468 | struct tty_ldisc *disc; | 477 | struct tty_ldisc *disc; |
469 | 478 | ||
470 | tty = port->itty; | 479 | tty = READ_ONCE(port->itty); |
471 | if (tty == NULL) | 480 | if (tty == NULL) |
472 | return; | 481 | return; |
473 | 482 | ||
@@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work) | |||
491 | * is advancing to the next buffer | 500 | * is advancing to the next buffer |
492 | */ | 501 | */ |
493 | next = smp_load_acquire(&head->next); | 502 | next = smp_load_acquire(&head->next); |
494 | count = head->commit - head->read; | 503 | /* paired w/ release in __tty_buffer_request_room() or in |
504 | * tty_buffer_flush(); ensures we see the committed buffer data | ||
505 | */ | ||
506 | count = smp_load_acquire(&head->commit) - head->read; | ||
495 | if (!count) { | 507 | if (!count) { |
496 | if (next == NULL) { | 508 | if (next == NULL) { |
497 | check_other_closed(tty); | 509 | check_other_closed(tty); |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 02785d844354..2eefaa6e3e3a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -2128,8 +2128,24 @@ retry_open: | |||
2128 | if (!noctty && | 2128 | if (!noctty && |
2129 | current->signal->leader && | 2129 | current->signal->leader && |
2130 | !current->signal->tty && | 2130 | !current->signal->tty && |
2131 | tty->session == NULL) | 2131 | tty->session == NULL) { |
2132 | __proc_set_tty(tty); | 2132 | /* |
2133 | * Don't let a process that only has write access to the tty | ||
2134 | * obtain the privileges associated with having a tty as | ||
2135 | * controlling terminal (being able to reopen it with full | ||
2136 | * access through /dev/tty, being able to perform pushback). | ||
2137 | * Many distributions set the group of all ttys to "tty" and | ||
2138 | * grant write-only access to all terminals for setgid tty | ||
2139 | * binaries, which should not imply full privileges on all ttys. | ||
2140 | * | ||
2141 | * This could theoretically break old code that performs open() | ||
2142 | * on a write-only file descriptor. In that case, it might be | ||
2143 | * necessary to also permit this if | ||
2144 | * inode_permission(inode, MAY_READ) == 0. | ||
2145 | */ | ||
2146 | if (filp->f_mode & FMODE_READ) | ||
2147 | __proc_set_tty(tty); | ||
2148 | } | ||
2133 | spin_unlock_irq(¤t->sighand->siglock); | 2149 | spin_unlock_irq(¤t->sighand->siglock); |
2134 | read_unlock(&tasklist_lock); | 2150 | read_unlock(&tasklist_lock); |
2135 | tty_unlock(tty); | 2151 | tty_unlock(tty); |
@@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p) | |||
2418 | * Takes ->siglock() when updating signal->tty | 2434 | * Takes ->siglock() when updating signal->tty |
2419 | */ | 2435 | */ |
2420 | 2436 | ||
2421 | static int tiocsctty(struct tty_struct *tty, int arg) | 2437 | static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) |
2422 | { | 2438 | { |
2423 | int ret = 0; | 2439 | int ret = 0; |
2424 | 2440 | ||
@@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg) | |||
2452 | goto unlock; | 2468 | goto unlock; |
2453 | } | 2469 | } |
2454 | } | 2470 | } |
2471 | |||
2472 | /* See the comment in tty_open(). */ | ||
2473 | if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { | ||
2474 | ret = -EPERM; | ||
2475 | goto unlock; | ||
2476 | } | ||
2477 | |||
2455 | proc_set_tty(tty); | 2478 | proc_set_tty(tty); |
2456 | unlock: | 2479 | unlock: |
2457 | read_unlock(&tasklist_lock); | 2480 | read_unlock(&tasklist_lock); |
@@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2844 | no_tty(); | 2867 | no_tty(); |
2845 | return 0; | 2868 | return 0; |
2846 | case TIOCSCTTY: | 2869 | case TIOCSCTTY: |
2847 | return tiocsctty(tty, arg); | 2870 | return tiocsctty(tty, file, arg); |
2848 | case TIOCGPGRP: | 2871 | case TIOCGPGRP: |
2849 | return tiocgpgrp(tty, real_tty, p); | 2872 | return tiocgpgrp(tty, real_tty, p); |
2850 | case TIOCSPGRP: | 2873 | case TIOCSPGRP: |
@@ -3151,13 +3174,18 @@ struct class *tty_class; | |||
3151 | static int tty_cdev_add(struct tty_driver *driver, dev_t dev, | 3174 | static int tty_cdev_add(struct tty_driver *driver, dev_t dev, |
3152 | unsigned int index, unsigned int count) | 3175 | unsigned int index, unsigned int count) |
3153 | { | 3176 | { |
3177 | int err; | ||
3178 | |||
3154 | /* init here, since reused cdevs cause crashes */ | 3179 | /* init here, since reused cdevs cause crashes */ |
3155 | driver->cdevs[index] = cdev_alloc(); | 3180 | driver->cdevs[index] = cdev_alloc(); |
3156 | if (!driver->cdevs[index]) | 3181 | if (!driver->cdevs[index]) |
3157 | return -ENOMEM; | 3182 | return -ENOMEM; |
3158 | cdev_init(driver->cdevs[index], &tty_fops); | 3183 | driver->cdevs[index]->ops = &tty_fops; |
3159 | driver->cdevs[index]->owner = driver->owner; | 3184 | driver->cdevs[index]->owner = driver->owner; |
3160 | return cdev_add(driver->cdevs[index], dev, count); | 3185 | err = cdev_add(driver->cdevs[index], dev, count); |
3186 | if (err) | ||
3187 | kobject_put(&driver->cdevs[index]->kobj); | ||
3188 | return err; | ||
3161 | } | 3189 | } |
3162 | 3190 | ||
3163 | /** | 3191 | /** |
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 867e9f3f3859..dcc50c878159 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c | |||
@@ -61,7 +61,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { | |||
61 | { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, | 61 | { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, |
62 | { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data}, | 62 | { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data}, |
63 | { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data}, | 63 | { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data}, |
64 | { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data}, | 64 | { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data}, |
65 | { /* sentinel */ } | 65 | { /* sentinel */ } |
66 | }; | 66 | }; |
67 | MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); | 67 | MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); |
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c index 9eae1a16cef9..4456d2cf80ff 100644 --- a/drivers/usb/chipidea/ci_hdrc_usb2.c +++ b/drivers/usb/chipidea/ci_hdrc_usb2.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/of.h> | 14 | #include <linux/of.h> |
15 | #include <linux/of_platform.h> | ||
15 | #include <linux/phy/phy.h> | 16 | #include <linux/phy/phy.h> |
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/usb/chipidea.h> | 18 | #include <linux/usb/chipidea.h> |
@@ -30,18 +31,36 @@ static const struct ci_hdrc_platform_data ci_default_pdata = { | |||
30 | .flags = CI_HDRC_DISABLE_STREAMING, | 31 | .flags = CI_HDRC_DISABLE_STREAMING, |
31 | }; | 32 | }; |
32 | 33 | ||
34 | static struct ci_hdrc_platform_data ci_zynq_pdata = { | ||
35 | .capoffset = DEF_CAPOFFSET, | ||
36 | }; | ||
37 | |||
38 | static const struct of_device_id ci_hdrc_usb2_of_match[] = { | ||
39 | { .compatible = "chipidea,usb2"}, | ||
40 | { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata}, | ||
41 | { } | ||
42 | }; | ||
43 | MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match); | ||
44 | |||
33 | static int ci_hdrc_usb2_probe(struct platform_device *pdev) | 45 | static int ci_hdrc_usb2_probe(struct platform_device *pdev) |
34 | { | 46 | { |
35 | struct device *dev = &pdev->dev; | 47 | struct device *dev = &pdev->dev; |
36 | struct ci_hdrc_usb2_priv *priv; | 48 | struct ci_hdrc_usb2_priv *priv; |
37 | struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev); | 49 | struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev); |
38 | int ret; | 50 | int ret; |
51 | const struct of_device_id *match; | ||
39 | 52 | ||
40 | if (!ci_pdata) { | 53 | if (!ci_pdata) { |
41 | ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL); | 54 | ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL); |
42 | *ci_pdata = ci_default_pdata; /* struct copy */ | 55 | *ci_pdata = ci_default_pdata; /* struct copy */ |
43 | } | 56 | } |
44 | 57 | ||
58 | match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev); | ||
59 | if (match && match->data) { | ||
60 | /* struct copy */ | ||
61 | *ci_pdata = *(struct ci_hdrc_platform_data *)match->data; | ||
62 | } | ||
63 | |||
45 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 64 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
46 | if (!priv) | 65 | if (!priv) |
47 | return -ENOMEM; | 66 | return -ENOMEM; |
@@ -96,12 +115,6 @@ static int ci_hdrc_usb2_remove(struct platform_device *pdev) | |||
96 | return 0; | 115 | return 0; |
97 | } | 116 | } |
98 | 117 | ||
99 | static const struct of_device_id ci_hdrc_usb2_of_match[] = { | ||
100 | { .compatible = "chipidea,usb2" }, | ||
101 | { } | ||
102 | }; | ||
103 | MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match); | ||
104 | |||
105 | static struct platform_driver ci_hdrc_usb2_driver = { | 118 | static struct platform_driver ci_hdrc_usb2_driver = { |
106 | .probe = ci_hdrc_usb2_probe, | 119 | .probe = ci_hdrc_usb2_probe, |
107 | .remove = ci_hdrc_usb2_remove, | 120 | .remove = ci_hdrc_usb2_remove, |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index a637da25dda0..8223fe73ea85 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -656,6 +656,44 @@ __acquires(hwep->lock) | |||
656 | return 0; | 656 | return 0; |
657 | } | 657 | } |
658 | 658 | ||
659 | static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer) | ||
660 | { | ||
661 | struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); | ||
662 | int direction, retval = 0; | ||
663 | unsigned long flags; | ||
664 | |||
665 | if (ep == NULL || hwep->ep.desc == NULL) | ||
666 | return -EINVAL; | ||
667 | |||
668 | if (usb_endpoint_xfer_isoc(hwep->ep.desc)) | ||
669 | return -EOPNOTSUPP; | ||
670 | |||
671 | spin_lock_irqsave(hwep->lock, flags); | ||
672 | |||
673 | if (value && hwep->dir == TX && check_transfer && | ||
674 | !list_empty(&hwep->qh.queue) && | ||
675 | !usb_endpoint_xfer_control(hwep->ep.desc)) { | ||
676 | spin_unlock_irqrestore(hwep->lock, flags); | ||
677 | return -EAGAIN; | ||
678 | } | ||
679 | |||
680 | direction = hwep->dir; | ||
681 | do { | ||
682 | retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); | ||
683 | |||
684 | if (!value) | ||
685 | hwep->wedge = 0; | ||
686 | |||
687 | if (hwep->type == USB_ENDPOINT_XFER_CONTROL) | ||
688 | hwep->dir = (hwep->dir == TX) ? RX : TX; | ||
689 | |||
690 | } while (hwep->dir != direction); | ||
691 | |||
692 | spin_unlock_irqrestore(hwep->lock, flags); | ||
693 | return retval; | ||
694 | } | ||
695 | |||
696 | |||
659 | /** | 697 | /** |
660 | * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts | 698 | * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts |
661 | * @gadget: gadget | 699 | * @gadget: gadget |
@@ -1051,7 +1089,7 @@ __acquires(ci->lock) | |||
1051 | num += ci->hw_ep_max / 2; | 1089 | num += ci->hw_ep_max / 2; |
1052 | 1090 | ||
1053 | spin_unlock(&ci->lock); | 1091 | spin_unlock(&ci->lock); |
1054 | err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep); | 1092 | err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false); |
1055 | spin_lock(&ci->lock); | 1093 | spin_lock(&ci->lock); |
1056 | if (!err) | 1094 | if (!err) |
1057 | isr_setup_status_phase(ci); | 1095 | isr_setup_status_phase(ci); |
@@ -1117,8 +1155,8 @@ delegate: | |||
1117 | 1155 | ||
1118 | if (err < 0) { | 1156 | if (err < 0) { |
1119 | spin_unlock(&ci->lock); | 1157 | spin_unlock(&ci->lock); |
1120 | if (usb_ep_set_halt(&hwep->ep)) | 1158 | if (_ep_set_halt(&hwep->ep, 1, false)) |
1121 | dev_err(ci->dev, "error: ep_set_halt\n"); | 1159 | dev_err(ci->dev, "error: _ep_set_halt\n"); |
1122 | spin_lock(&ci->lock); | 1160 | spin_lock(&ci->lock); |
1123 | } | 1161 | } |
1124 | } | 1162 | } |
@@ -1149,9 +1187,9 @@ __acquires(ci->lock) | |||
1149 | err = isr_setup_status_phase(ci); | 1187 | err = isr_setup_status_phase(ci); |
1150 | if (err < 0) { | 1188 | if (err < 0) { |
1151 | spin_unlock(&ci->lock); | 1189 | spin_unlock(&ci->lock); |
1152 | if (usb_ep_set_halt(&hwep->ep)) | 1190 | if (_ep_set_halt(&hwep->ep, 1, false)) |
1153 | dev_err(ci->dev, | 1191 | dev_err(ci->dev, |
1154 | "error: ep_set_halt\n"); | 1192 | "error: _ep_set_halt\n"); |
1155 | spin_lock(&ci->lock); | 1193 | spin_lock(&ci->lock); |
1156 | } | 1194 | } |
1157 | } | 1195 | } |
@@ -1397,41 +1435,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) | |||
1397 | */ | 1435 | */ |
1398 | static int ep_set_halt(struct usb_ep *ep, int value) | 1436 | static int ep_set_halt(struct usb_ep *ep, int value) |
1399 | { | 1437 | { |
1400 | struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); | 1438 | return _ep_set_halt(ep, value, true); |
1401 | int direction, retval = 0; | ||
1402 | unsigned long flags; | ||
1403 | |||
1404 | if (ep == NULL || hwep->ep.desc == NULL) | ||
1405 | return -EINVAL; | ||
1406 | |||
1407 | if (usb_endpoint_xfer_isoc(hwep->ep.desc)) | ||
1408 | return -EOPNOTSUPP; | ||
1409 | |||
1410 | spin_lock_irqsave(hwep->lock, flags); | ||
1411 | |||
1412 | #ifndef STALL_IN | ||
1413 | /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ | ||
1414 | if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX && | ||
1415 | !list_empty(&hwep->qh.queue)) { | ||
1416 | spin_unlock_irqrestore(hwep->lock, flags); | ||
1417 | return -EAGAIN; | ||
1418 | } | ||
1419 | #endif | ||
1420 | |||
1421 | direction = hwep->dir; | ||
1422 | do { | ||
1423 | retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); | ||
1424 | |||
1425 | if (!value) | ||
1426 | hwep->wedge = 0; | ||
1427 | |||
1428 | if (hwep->type == USB_ENDPOINT_XFER_CONTROL) | ||
1429 | hwep->dir = (hwep->dir == TX) ? RX : TX; | ||
1430 | |||
1431 | } while (hwep->dir != direction); | ||
1432 | |||
1433 | spin_unlock_irqrestore(hwep->lock, flags); | ||
1434 | return retval; | ||
1435 | } | 1439 | } |
1436 | 1440 | ||
1437 | /** | 1441 | /** |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b2a540b43f97..b9ddf0c1ffe5 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
112 | cfgno, inum, asnum, ep->desc.bEndpointAddress); | 112 | cfgno, inum, asnum, ep->desc.bEndpointAddress); |
113 | ep->ss_ep_comp.bmAttributes = 16; | 113 | ep->ss_ep_comp.bmAttributes = 16; |
114 | } else if (usb_endpoint_xfer_isoc(&ep->desc) && | 114 | } else if (usb_endpoint_xfer_isoc(&ep->desc) && |
115 | desc->bmAttributes > 2) { | 115 | USB_SS_MULT(desc->bmAttributes) > 3) { |
116 | dev_warn(ddev, "Isoc endpoint has Mult of %d in " | 116 | dev_warn(ddev, "Isoc endpoint has Mult of %d in " |
117 | "config %d interface %d altsetting %d ep %d: " | 117 | "config %d interface %d altsetting %d ep %d: " |
118 | "setting to 3\n", desc->bmAttributes + 1, | 118 | "setting to 3\n", desc->bmAttributes + 1, |
@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
121 | } | 121 | } |
122 | 122 | ||
123 | if (usb_endpoint_xfer_isoc(&ep->desc)) | 123 | if (usb_endpoint_xfer_isoc(&ep->desc)) |
124 | max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * | 124 | max_tx = (desc->bMaxBurst + 1) * |
125 | (USB_SS_MULT(desc->bmAttributes)) * | ||
125 | usb_endpoint_maxp(&ep->desc); | 126 | usb_endpoint_maxp(&ep->desc); |
126 | else if (usb_endpoint_xfer_int(&ep->desc)) | 127 | else if (usb_endpoint_xfer_int(&ep->desc)) |
127 | max_tx = usb_endpoint_maxp(&ep->desc) * | 128 | max_tx = usb_endpoint_maxp(&ep->desc) * |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d85abfed84cc..f5a381945db2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
54 | { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, | 54 | { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, |
55 | { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, | 55 | { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, |
56 | 56 | ||
57 | /* Logitech ConferenceCam CC3000e */ | ||
58 | { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
59 | { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
60 | |||
61 | /* Logitech PTZ Pro Camera */ | ||
62 | { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
63 | |||
57 | /* Logitech Quickcam Fusion */ | 64 | /* Logitech Quickcam Fusion */ |
58 | { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, | 65 | { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 66 | ||
@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
78 | /* Philips PSC805 audio device */ | 85 | /* Philips PSC805 audio device */ |
79 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 86 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, |
80 | 87 | ||
88 | /* Plantronic Audio 655 DSP */ | ||
89 | { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
90 | |||
91 | /* Plantronic Audio 648 USB */ | ||
92 | { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
93 | |||
81 | /* Artisman Watchdog Dongle */ | 94 | /* Artisman Watchdog Dongle */ |
82 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = | 95 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = |
83 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 96 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index a5a1b7c45743..22e9606d8e08 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -514,8 +514,6 @@ static int dwc3_omap_probe(struct platform_device *pdev) | |||
514 | goto err1; | 514 | goto err1; |
515 | } | 515 | } |
516 | 516 | ||
517 | dwc3_omap_enable_irqs(omap); | ||
518 | |||
519 | ret = dwc3_omap_extcon_register(omap); | 517 | ret = dwc3_omap_extcon_register(omap); |
520 | if (ret < 0) | 518 | if (ret < 0) |
521 | goto err2; | 519 | goto err2; |
@@ -526,6 +524,8 @@ static int dwc3_omap_probe(struct platform_device *pdev) | |||
526 | goto err3; | 524 | goto err3; |
527 | } | 525 | } |
528 | 526 | ||
527 | dwc3_omap_enable_irqs(omap); | ||
528 | |||
529 | return 0; | 529 | return 0; |
530 | 530 | ||
531 | err3: | 531 | err3: |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0c25704dcb6b..1e8bdf817811 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -2665,8 +2665,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc) | |||
2665 | int i; | 2665 | int i; |
2666 | irqreturn_t ret = IRQ_NONE; | 2666 | irqreturn_t ret = IRQ_NONE; |
2667 | 2667 | ||
2668 | spin_lock(&dwc->lock); | ||
2669 | |||
2670 | for (i = 0; i < dwc->num_event_buffers; i++) { | 2668 | for (i = 0; i < dwc->num_event_buffers; i++) { |
2671 | irqreturn_t status; | 2669 | irqreturn_t status; |
2672 | 2670 | ||
@@ -2675,8 +2673,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc) | |||
2675 | ret = status; | 2673 | ret = status; |
2676 | } | 2674 | } |
2677 | 2675 | ||
2678 | spin_unlock(&dwc->lock); | ||
2679 | |||
2680 | return ret; | 2676 | return ret; |
2681 | } | 2677 | } |
2682 | 2678 | ||
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index 978435a51038..6399c106a3a5 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c | |||
@@ -186,6 +186,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget) | |||
186 | 186 | ||
187 | list_for_each_entry (ep, &gadget->ep_list, ep_list) { | 187 | list_for_each_entry (ep, &gadget->ep_list, ep_list) { |
188 | ep->claimed = false; | 188 | ep->claimed = false; |
189 | ep->driver_data = NULL; | ||
189 | } | 190 | } |
190 | gadget->in_epnum = 0; | 191 | gadget->in_epnum = 0; |
191 | gadget->out_epnum = 0; | 192 | gadget->out_epnum = 0; |
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c index fdacddb18c00..175ca93fe5e2 100644 --- a/drivers/usb/gadget/udc/amd5536udc.c +++ b/drivers/usb/gadget/udc/amd5536udc.c | |||
@@ -3138,8 +3138,8 @@ static void udc_pci_remove(struct pci_dev *pdev) | |||
3138 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | 3138 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); |
3139 | if (dev->irq_registered) | 3139 | if (dev->irq_registered) |
3140 | free_irq(pdev->irq, dev); | 3140 | free_irq(pdev->irq, dev); |
3141 | if (dev->regs) | 3141 | if (dev->virt_addr) |
3142 | iounmap(dev->regs); | 3142 | iounmap(dev->virt_addr); |
3143 | if (dev->mem_region) | 3143 | if (dev->mem_region) |
3144 | release_mem_region(pci_resource_start(pdev, 0), | 3144 | release_mem_region(pci_resource_start(pdev, 0), |
3145 | pci_resource_len(pdev, 0)); | 3145 | pci_resource_len(pdev, 0)); |
@@ -3226,17 +3226,13 @@ static int udc_pci_probe( | |||
3226 | 3226 | ||
3227 | /* init */ | 3227 | /* init */ |
3228 | dev = kzalloc(sizeof(struct udc), GFP_KERNEL); | 3228 | dev = kzalloc(sizeof(struct udc), GFP_KERNEL); |
3229 | if (!dev) { | 3229 | if (!dev) |
3230 | retval = -ENOMEM; | 3230 | return -ENOMEM; |
3231 | goto finished; | ||
3232 | } | ||
3233 | 3231 | ||
3234 | /* pci setup */ | 3232 | /* pci setup */ |
3235 | if (pci_enable_device(pdev) < 0) { | 3233 | if (pci_enable_device(pdev) < 0) { |
3236 | kfree(dev); | ||
3237 | dev = NULL; | ||
3238 | retval = -ENODEV; | 3234 | retval = -ENODEV; |
3239 | goto finished; | 3235 | goto err_pcidev; |
3240 | } | 3236 | } |
3241 | dev->active = 1; | 3237 | dev->active = 1; |
3242 | 3238 | ||
@@ -3246,28 +3242,22 @@ static int udc_pci_probe( | |||
3246 | 3242 | ||
3247 | if (!request_mem_region(resource, len, name)) { | 3243 | if (!request_mem_region(resource, len, name)) { |
3248 | dev_dbg(&pdev->dev, "pci device used already\n"); | 3244 | dev_dbg(&pdev->dev, "pci device used already\n"); |
3249 | kfree(dev); | ||
3250 | dev = NULL; | ||
3251 | retval = -EBUSY; | 3245 | retval = -EBUSY; |
3252 | goto finished; | 3246 | goto err_memreg; |
3253 | } | 3247 | } |
3254 | dev->mem_region = 1; | 3248 | dev->mem_region = 1; |
3255 | 3249 | ||
3256 | dev->virt_addr = ioremap_nocache(resource, len); | 3250 | dev->virt_addr = ioremap_nocache(resource, len); |
3257 | if (dev->virt_addr == NULL) { | 3251 | if (dev->virt_addr == NULL) { |
3258 | dev_dbg(&pdev->dev, "start address cannot be mapped\n"); | 3252 | dev_dbg(&pdev->dev, "start address cannot be mapped\n"); |
3259 | kfree(dev); | ||
3260 | dev = NULL; | ||
3261 | retval = -EFAULT; | 3253 | retval = -EFAULT; |
3262 | goto finished; | 3254 | goto err_ioremap; |
3263 | } | 3255 | } |
3264 | 3256 | ||
3265 | if (!pdev->irq) { | 3257 | if (!pdev->irq) { |
3266 | dev_err(&pdev->dev, "irq not set\n"); | 3258 | dev_err(&pdev->dev, "irq not set\n"); |
3267 | kfree(dev); | ||
3268 | dev = NULL; | ||
3269 | retval = -ENODEV; | 3259 | retval = -ENODEV; |
3270 | goto finished; | 3260 | goto err_irq; |
3271 | } | 3261 | } |
3272 | 3262 | ||
3273 | spin_lock_init(&dev->lock); | 3263 | spin_lock_init(&dev->lock); |
@@ -3283,10 +3273,8 @@ static int udc_pci_probe( | |||
3283 | 3273 | ||
3284 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { | 3274 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { |
3285 | dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq); | 3275 | dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq); |
3286 | kfree(dev); | ||
3287 | dev = NULL; | ||
3288 | retval = -EBUSY; | 3276 | retval = -EBUSY; |
3289 | goto finished; | 3277 | goto err_irq; |
3290 | } | 3278 | } |
3291 | dev->irq_registered = 1; | 3279 | dev->irq_registered = 1; |
3292 | 3280 | ||
@@ -3314,8 +3302,17 @@ static int udc_pci_probe( | |||
3314 | return 0; | 3302 | return 0; |
3315 | 3303 | ||
3316 | finished: | 3304 | finished: |
3317 | if (dev) | 3305 | udc_pci_remove(pdev); |
3318 | udc_pci_remove(pdev); | 3306 | return retval; |
3307 | |||
3308 | err_irq: | ||
3309 | iounmap(dev->virt_addr); | ||
3310 | err_ioremap: | ||
3311 | release_mem_region(resource, len); | ||
3312 | err_memreg: | ||
3313 | pci_disable_device(pdev); | ||
3314 | err_pcidev: | ||
3315 | kfree(dev); | ||
3319 | return retval; | 3316 | return retval; |
3320 | } | 3317 | } |
3321 | 3318 | ||
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 3dfada8d6061..f0f2b066ac08 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
@@ -2002,6 +2002,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, | |||
2002 | ep->udc = udc; | 2002 | ep->udc = udc; |
2003 | INIT_LIST_HEAD(&ep->queue); | 2003 | INIT_LIST_HEAD(&ep->queue); |
2004 | 2004 | ||
2005 | if (ep->index == 0) { | ||
2006 | ep->ep.caps.type_control = true; | ||
2007 | } else { | ||
2008 | ep->ep.caps.type_iso = ep->can_isoc; | ||
2009 | ep->ep.caps.type_bulk = true; | ||
2010 | ep->ep.caps.type_int = true; | ||
2011 | } | ||
2012 | |||
2013 | ep->ep.caps.dir_in = true; | ||
2014 | ep->ep.caps.dir_out = true; | ||
2015 | |||
2005 | if (i) | 2016 | if (i) |
2006 | list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); | 2017 | list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); |
2007 | 2018 | ||
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 5c8f4effb62a..ccb9c213cc9f 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c | |||
@@ -324,8 +324,7 @@ static void bdc_mem_free(struct bdc *bdc) | |||
324 | bdc->scratchpad.buff, bdc->scratchpad.sp_dma); | 324 | bdc->scratchpad.buff, bdc->scratchpad.sp_dma); |
325 | 325 | ||
326 | /* Destroy the dma pools */ | 326 | /* Destroy the dma pools */ |
327 | if (bdc->bd_table_pool) | 327 | dma_pool_destroy(bdc->bd_table_pool); |
328 | dma_pool_destroy(bdc->bd_table_pool); | ||
329 | 328 | ||
330 | /* Free the bdc_ep array */ | 329 | /* Free the bdc_ep array */ |
331 | kfree(bdc->bdc_ep_array); | 330 | kfree(bdc->bdc_ep_array); |
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d1b81539d632..d6199507f861 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c | |||
@@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) | |||
159 | bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, | 159 | bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, |
160 | GFP_ATOMIC, | 160 | GFP_ATOMIC, |
161 | &dma); | 161 | &dma); |
162 | if (!bd_table->start_bd) | 162 | if (!bd_table->start_bd) { |
163 | kfree(bd_table); | ||
163 | goto fail; | 164 | goto fail; |
165 | } | ||
164 | 166 | ||
165 | bd_table->dma = dma; | 167 | bd_table->dma = dma; |
166 | 168 | ||
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 1379ad40d864..27af0f008b57 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -1348,6 +1348,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb, | |||
1348 | { | 1348 | { |
1349 | struct dummy *dum = dum_hcd->dum; | 1349 | struct dummy *dum = dum_hcd->dum; |
1350 | struct dummy_request *req; | 1350 | struct dummy_request *req; |
1351 | int sent = 0; | ||
1351 | 1352 | ||
1352 | top: | 1353 | top: |
1353 | /* if there's no request queued, the device is NAKing; return */ | 1354 | /* if there's no request queued, the device is NAKing; return */ |
@@ -1385,12 +1386,15 @@ top: | |||
1385 | if (len == 0) | 1386 | if (len == 0) |
1386 | break; | 1387 | break; |
1387 | 1388 | ||
1388 | /* use an extra pass for the final short packet */ | 1389 | /* send multiple of maxpacket first, then remainder */ |
1389 | if (len > ep->ep.maxpacket) { | 1390 | if (len >= ep->ep.maxpacket) { |
1390 | rescan = 1; | 1391 | is_short = 0; |
1391 | len -= (len % ep->ep.maxpacket); | 1392 | if (len % ep->ep.maxpacket) |
1393 | rescan = 1; | ||
1394 | len -= len % ep->ep.maxpacket; | ||
1395 | } else { | ||
1396 | is_short = 1; | ||
1392 | } | 1397 | } |
1393 | is_short = (len % ep->ep.maxpacket) != 0; | ||
1394 | 1398 | ||
1395 | len = dummy_perform_transfer(urb, req, len); | 1399 | len = dummy_perform_transfer(urb, req, len); |
1396 | 1400 | ||
@@ -1399,6 +1403,7 @@ top: | |||
1399 | req->req.status = len; | 1403 | req->req.status = len; |
1400 | } else { | 1404 | } else { |
1401 | limit -= len; | 1405 | limit -= len; |
1406 | sent += len; | ||
1402 | urb->actual_length += len; | 1407 | urb->actual_length += len; |
1403 | req->req.actual += len; | 1408 | req->req.actual += len; |
1404 | } | 1409 | } |
@@ -1421,7 +1426,7 @@ top: | |||
1421 | *status = -EOVERFLOW; | 1426 | *status = -EOVERFLOW; |
1422 | else | 1427 | else |
1423 | *status = 0; | 1428 | *status = 0; |
1424 | } else if (!to_host) { | 1429 | } else { |
1425 | *status = 0; | 1430 | *status = 0; |
1426 | if (host_len > dev_len) | 1431 | if (host_len > dev_len) |
1427 | req->req.status = -EOVERFLOW; | 1432 | req->req.status = -EOVERFLOW; |
@@ -1429,15 +1434,24 @@ top: | |||
1429 | req->req.status = 0; | 1434 | req->req.status = 0; |
1430 | } | 1435 | } |
1431 | 1436 | ||
1432 | /* many requests terminate without a short packet */ | 1437 | /* |
1438 | * many requests terminate without a short packet. | ||
1439 | * send a zlp if demanded by flags. | ||
1440 | */ | ||
1433 | } else { | 1441 | } else { |
1434 | if (req->req.length == req->req.actual | 1442 | if (req->req.length == req->req.actual) { |
1435 | && !req->req.zero) | 1443 | if (req->req.zero && to_host) |
1436 | req->req.status = 0; | 1444 | rescan = 1; |
1437 | if (urb->transfer_buffer_length == urb->actual_length | 1445 | else |
1438 | && !(urb->transfer_flags | 1446 | req->req.status = 0; |
1439 | & URB_ZERO_PACKET)) | 1447 | } |
1440 | *status = 0; | 1448 | if (urb->transfer_buffer_length == urb->actual_length) { |
1449 | if (urb->transfer_flags & URB_ZERO_PACKET && | ||
1450 | !to_host) | ||
1451 | rescan = 1; | ||
1452 | else | ||
1453 | *status = 0; | ||
1454 | } | ||
1441 | } | 1455 | } |
1442 | 1456 | ||
1443 | /* device side completion --> continuable */ | 1457 | /* device side completion --> continuable */ |
@@ -1460,7 +1474,7 @@ top: | |||
1460 | if (rescan) | 1474 | if (rescan) |
1461 | goto top; | 1475 | goto top; |
1462 | } | 1476 | } |
1463 | return limit; | 1477 | return sent; |
1464 | } | 1478 | } |
1465 | 1479 | ||
1466 | static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) | 1480 | static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) |
@@ -1890,7 +1904,7 @@ restart: | |||
1890 | default: | 1904 | default: |
1891 | treat_control_like_bulk: | 1905 | treat_control_like_bulk: |
1892 | ep->last_io = jiffies; | 1906 | ep->last_io = jiffies; |
1893 | total = transfer(dum_hcd, urb, ep, limit, &status); | 1907 | total -= transfer(dum_hcd, urb, ep, limit, &status); |
1894 | break; | 1908 | break; |
1895 | } | 1909 | } |
1896 | 1910 | ||
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 8aa2593c2c36..b9429bc42511 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c | |||
@@ -2117,8 +2117,7 @@ static int gr_remove(struct platform_device *pdev) | |||
2117 | return -EBUSY; | 2117 | return -EBUSY; |
2118 | 2118 | ||
2119 | gr_dfs_delete(dev); | 2119 | gr_dfs_delete(dev); |
2120 | if (dev->desc_pool) | 2120 | dma_pool_destroy(dev->desc_pool); |
2121 | dma_pool_destroy(dev->desc_pool); | ||
2122 | platform_set_drvdata(pdev, NULL); | 2121 | platform_set_drvdata(pdev, NULL); |
2123 | 2122 | ||
2124 | gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); | 2123 | gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); |
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index 4c489692745e..dafe74eb9ade 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c | |||
@@ -1767,8 +1767,7 @@ static int mv_u3d_remove(struct platform_device *dev) | |||
1767 | usb_del_gadget_udc(&u3d->gadget); | 1767 | usb_del_gadget_udc(&u3d->gadget); |
1768 | 1768 | ||
1769 | /* free memory allocated in probe */ | 1769 | /* free memory allocated in probe */ |
1770 | if (u3d->trb_pool) | 1770 | dma_pool_destroy(u3d->trb_pool); |
1771 | dma_pool_destroy(u3d->trb_pool); | ||
1772 | 1771 | ||
1773 | if (u3d->ep_context) | 1772 | if (u3d->ep_context) |
1774 | dma_free_coherent(&dev->dev, u3d->ep_context_size, | 1773 | dma_free_coherent(&dev->dev, u3d->ep_context_size, |
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 339af51df57d..81b6229c7805 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c | |||
@@ -2100,8 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev) | |||
2100 | } | 2100 | } |
2101 | 2101 | ||
2102 | /* free memory allocated in probe */ | 2102 | /* free memory allocated in probe */ |
2103 | if (udc->dtd_pool) | 2103 | dma_pool_destroy(udc->dtd_pool); |
2104 | dma_pool_destroy(udc->dtd_pool); | ||
2105 | 2104 | ||
2106 | if (udc->ep_dqh) | 2105 | if (udc->ep_dqh) |
2107 | dma_free_coherent(&pdev->dev, udc->ep_dqh_size, | 2106 | dma_free_coherent(&pdev->dev, udc->ep_dqh_size, |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 9a8c936cd42c..41f841fa6c4d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1498 | * use Event Data TRBs, and we don't chain in a link TRB on short | 1498 | * use Event Data TRBs, and we don't chain in a link TRB on short |
1499 | * transfers, we're basically dividing by 1. | 1499 | * transfers, we're basically dividing by 1. |
1500 | * | 1500 | * |
1501 | * xHCI 1.0 specification indicates that the Average TRB Length should | 1501 | * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length |
1502 | * be set to 8 for control endpoints. | 1502 | * should be set to 8 for control endpoints. |
1503 | */ | 1503 | */ |
1504 | if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) | 1504 | if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) |
1505 | ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); | 1505 | ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); |
1506 | else | 1506 | else |
1507 | ep_ctx->tx_info |= | 1507 | ep_ctx->tx_info |= |
@@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1792 | int size; | 1792 | int size; |
1793 | int i, j, num_ports; | 1793 | int i, j, num_ports; |
1794 | 1794 | ||
1795 | if (timer_pending(&xhci->cmd_timer)) | 1795 | del_timer_sync(&xhci->cmd_timer); |
1796 | del_timer_sync(&xhci->cmd_timer); | ||
1797 | 1796 | ||
1798 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 1797 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
1799 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 1798 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
@@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2321 | 2320 | ||
2322 | INIT_LIST_HEAD(&xhci->cmd_list); | 2321 | INIT_LIST_HEAD(&xhci->cmd_list); |
2323 | 2322 | ||
2323 | /* init command timeout timer */ | ||
2324 | setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, | ||
2325 | (unsigned long)xhci); | ||
2326 | |||
2324 | page_size = readl(&xhci->op_regs->page_size); | 2327 | page_size = readl(&xhci->op_regs->page_size); |
2325 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2328 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2326 | "Supported page size register = 0x%x", page_size); | 2329 | "Supported page size register = 0x%x", page_size); |
@@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2505 | "Wrote ERST address to ir_set 0."); | 2508 | "Wrote ERST address to ir_set 0."); |
2506 | xhci_print_ir_set(xhci, 0); | 2509 | xhci_print_ir_set(xhci, 0); |
2507 | 2510 | ||
2508 | /* init command timeout timer */ | ||
2509 | setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, | ||
2510 | (unsigned long)xhci); | ||
2511 | |||
2512 | /* | 2511 | /* |
2513 | * XXX: Might need to set the Interrupter Moderation Register to | 2512 | * XXX: Might need to set the Interrupter Moderation Register to |
2514 | * something other than the default (~1ms minimum between interrupts). | 2513 | * something other than the default (~1ms minimum between interrupts). |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5590eac2b22d..c79d33676672 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -180,51 +180,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
180 | "QUIRK: Resetting on resume"); | 180 | "QUIRK: Resetting on resume"); |
181 | } | 181 | } |
182 | 182 | ||
183 | /* | ||
184 | * In some Intel xHCI controllers, in order to get D3 working, | ||
185 | * through a vendor specific SSIC CONFIG register at offset 0x883c, | ||
186 | * SSIC PORT need to be marked as "unused" before putting xHCI | ||
187 | * into D3. After D3 exit, the SSIC port need to be marked as "used". | ||
188 | * Without this change, xHCI might not enter D3 state. | ||
189 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear | ||
190 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 | ||
191 | */ | ||
192 | static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) | ||
193 | { | ||
194 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
195 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
196 | u32 val; | ||
197 | void __iomem *reg; | ||
198 | |||
199 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||
200 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { | ||
201 | |||
202 | reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; | ||
203 | |||
204 | /* Notify SSIC that SSIC profile programming is not done */ | ||
205 | val = readl(reg) & ~PROG_DONE; | ||
206 | writel(val, reg); | ||
207 | |||
208 | /* Mark SSIC port as unused(suspend) or used(resume) */ | ||
209 | val = readl(reg); | ||
210 | if (suspend) | ||
211 | val |= SSIC_PORT_UNUSED; | ||
212 | else | ||
213 | val &= ~SSIC_PORT_UNUSED; | ||
214 | writel(val, reg); | ||
215 | |||
216 | /* Notify SSIC that SSIC profile programming is done */ | ||
217 | val = readl(reg) | PROG_DONE; | ||
218 | writel(val, reg); | ||
219 | readl(reg); | ||
220 | } | ||
221 | |||
222 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; | ||
223 | val = readl(reg); | ||
224 | writel(val | BIT(28), reg); | ||
225 | readl(reg); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_ACPI | 183 | #ifdef CONFIG_ACPI |
229 | static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) | 184 | static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) |
230 | { | 185 | { |
@@ -345,6 +300,51 @@ static void xhci_pci_remove(struct pci_dev *dev) | |||
345 | } | 300 | } |
346 | 301 | ||
347 | #ifdef CONFIG_PM | 302 | #ifdef CONFIG_PM |
303 | /* | ||
304 | * In some Intel xHCI controllers, in order to get D3 working, | ||
305 | * through a vendor specific SSIC CONFIG register at offset 0x883c, | ||
306 | * SSIC PORT need to be marked as "unused" before putting xHCI | ||
307 | * into D3. After D3 exit, the SSIC port need to be marked as "used". | ||
308 | * Without this change, xHCI might not enter D3 state. | ||
309 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear | ||
310 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 | ||
311 | */ | ||
312 | static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) | ||
313 | { | ||
314 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
315 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
316 | u32 val; | ||
317 | void __iomem *reg; | ||
318 | |||
319 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||
320 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { | ||
321 | |||
322 | reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; | ||
323 | |||
324 | /* Notify SSIC that SSIC profile programming is not done */ | ||
325 | val = readl(reg) & ~PROG_DONE; | ||
326 | writel(val, reg); | ||
327 | |||
328 | /* Mark SSIC port as unused(suspend) or used(resume) */ | ||
329 | val = readl(reg); | ||
330 | if (suspend) | ||
331 | val |= SSIC_PORT_UNUSED; | ||
332 | else | ||
333 | val &= ~SSIC_PORT_UNUSED; | ||
334 | writel(val, reg); | ||
335 | |||
336 | /* Notify SSIC that SSIC profile programming is done */ | ||
337 | val = readl(reg) | PROG_DONE; | ||
338 | writel(val, reg); | ||
339 | readl(reg); | ||
340 | } | ||
341 | |||
342 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; | ||
343 | val = readl(reg); | ||
344 | writel(val | BIT(28), reg); | ||
345 | readl(reg); | ||
346 | } | ||
347 | |||
348 | static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | 348 | static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) |
349 | { | 349 | { |
350 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 350 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a47a1e897086..43291f93afeb 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
302 | ret = xhci_handshake(&xhci->op_regs->cmd_ring, | 302 | ret = xhci_handshake(&xhci->op_regs->cmd_ring, |
303 | CMD_RING_RUNNING, 0, 5 * 1000 * 1000); | 303 | CMD_RING_RUNNING, 0, 5 * 1000 * 1000); |
304 | if (ret < 0) { | 304 | if (ret < 0) { |
305 | /* we are about to kill xhci, give it one more chance */ | ||
306 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, | ||
307 | &xhci->op_regs->cmd_ring); | ||
308 | udelay(1000); | ||
309 | ret = xhci_handshake(&xhci->op_regs->cmd_ring, | ||
310 | CMD_RING_RUNNING, 0, 3 * 1000 * 1000); | ||
311 | if (ret == 0) | ||
312 | return 0; | ||
313 | |||
305 | xhci_err(xhci, "Stopped the command ring failed, " | 314 | xhci_err(xhci, "Stopped the command ring failed, " |
306 | "maybe the host is dead\n"); | 315 | "maybe the host is dead\n"); |
307 | xhci->xhc_state |= XHCI_STATE_DYING; | 316 | xhci->xhc_state |= XHCI_STATE_DYING; |
@@ -3461,8 +3470,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3461 | if (start_cycle == 0) | 3470 | if (start_cycle == 0) |
3462 | field |= 0x1; | 3471 | field |= 0x1; |
3463 | 3472 | ||
3464 | /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ | 3473 | /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ |
3465 | if (xhci->hci_version == 0x100) { | 3474 | if (xhci->hci_version >= 0x100) { |
3466 | if (urb->transfer_buffer_length > 0) { | 3475 | if (urb->transfer_buffer_length > 0) { |
3467 | if (setup->bRequestType & USB_DIR_IN) | 3476 | if (setup->bRequestType & USB_DIR_IN) |
3468 | field |= TRB_TX_TYPE(TRB_DATA_IN); | 3477 | field |= TRB_TX_TYPE(TRB_DATA_IN); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6b0f4a47e402..9957bd96d4bc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci) | |||
146 | "waited %u microseconds.\n", | 146 | "waited %u microseconds.\n", |
147 | XHCI_MAX_HALT_USEC); | 147 | XHCI_MAX_HALT_USEC); |
148 | if (!ret) | 148 | if (!ret) |
149 | xhci->xhc_state &= ~XHCI_STATE_HALTED; | 149 | xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); |
150 | |||
150 | return ret; | 151 | return ret; |
151 | } | 152 | } |
152 | 153 | ||
@@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
654 | } | 655 | } |
655 | EXPORT_SYMBOL_GPL(xhci_run); | 656 | EXPORT_SYMBOL_GPL(xhci_run); |
656 | 657 | ||
657 | static void xhci_only_stop_hcd(struct usb_hcd *hcd) | ||
658 | { | ||
659 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
660 | |||
661 | spin_lock_irq(&xhci->lock); | ||
662 | xhci_halt(xhci); | ||
663 | spin_unlock_irq(&xhci->lock); | ||
664 | } | ||
665 | |||
666 | /* | 658 | /* |
667 | * Stop xHCI driver. | 659 | * Stop xHCI driver. |
668 | * | 660 | * |
@@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd) | |||
677 | u32 temp; | 669 | u32 temp; |
678 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 670 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
679 | 671 | ||
680 | if (!usb_hcd_is_primary_hcd(hcd)) { | 672 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
681 | xhci_only_stop_hcd(xhci->shared_hcd); | ||
682 | return; | 673 | return; |
683 | } | ||
684 | 674 | ||
675 | mutex_lock(&xhci->mutex); | ||
685 | spin_lock_irq(&xhci->lock); | 676 | spin_lock_irq(&xhci->lock); |
677 | xhci->xhc_state |= XHCI_STATE_HALTED; | ||
678 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | ||
679 | |||
686 | /* Make sure the xHC is halted for a USB3 roothub | 680 | /* Make sure the xHC is halted for a USB3 roothub |
687 | * (xhci_stop() could be called as part of failed init). | 681 | * (xhci_stop() could be called as part of failed init). |
688 | */ | 682 | */ |
@@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd) | |||
717 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 711 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
718 | "xhci_stop completed - status = %x", | 712 | "xhci_stop completed - status = %x", |
719 | readl(&xhci->op_regs->status)); | 713 | readl(&xhci->op_regs->status)); |
714 | mutex_unlock(&xhci->mutex); | ||
720 | } | 715 | } |
721 | 716 | ||
722 | /* | 717 | /* |
@@ -3793,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3793 | 3788 | ||
3794 | mutex_lock(&xhci->mutex); | 3789 | mutex_lock(&xhci->mutex); |
3795 | 3790 | ||
3791 | if (xhci->xhc_state) /* dying or halted */ | ||
3792 | goto out; | ||
3793 | |||
3796 | if (!udev->slot_id) { | 3794 | if (!udev->slot_id) { |
3797 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3795 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3798 | "Bad Slot ID %d", udev->slot_id); | 3796 | "Bad Slot ID %d", udev->slot_id); |
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 3ad5d19e4d04..23c794813e6a 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c | |||
@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data, | |||
472 | if (this_time > max) | 472 | if (this_time > max) |
473 | this_time = max; | 473 | this_time = max; |
474 | 474 | ||
475 | memcpy(data, dev->buf, this_time); | 475 | memcpy(data, dev->buf + dev->used, this_time); |
476 | 476 | ||
477 | dev->used += this_time; | 477 | dev->used += this_time; |
478 | 478 | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 514a6cdaeff6..4a518ff12310 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1051,6 +1051,7 @@ void musb_start(struct musb *musb) | |||
1051 | * (c) peripheral initiates, using SRP | 1051 | * (c) peripheral initiates, using SRP |
1052 | */ | 1052 | */ |
1053 | if (musb->port_mode != MUSB_PORT_MODE_HOST && | 1053 | if (musb->port_mode != MUSB_PORT_MODE_HOST && |
1054 | musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON && | ||
1054 | (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { | 1055 | (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { |
1055 | musb->is_active = 1; | 1056 | musb->is_active = 1; |
1056 | } else { | 1057 | } else { |
@@ -2448,6 +2449,9 @@ static int musb_suspend(struct device *dev) | |||
2448 | struct musb *musb = dev_to_musb(dev); | 2449 | struct musb *musb = dev_to_musb(dev); |
2449 | unsigned long flags; | 2450 | unsigned long flags; |
2450 | 2451 | ||
2452 | musb_platform_disable(musb); | ||
2453 | musb_generic_disable(musb); | ||
2454 | |||
2451 | spin_lock_irqsave(&musb->lock, flags); | 2455 | spin_lock_irqsave(&musb->lock, flags); |
2452 | 2456 | ||
2453 | if (is_peripheral_active(musb)) { | 2457 | if (is_peripheral_active(musb)) { |
@@ -2501,6 +2505,9 @@ static int musb_resume(struct device *dev) | |||
2501 | pm_runtime_disable(dev); | 2505 | pm_runtime_disable(dev); |
2502 | pm_runtime_set_active(dev); | 2506 | pm_runtime_set_active(dev); |
2503 | pm_runtime_enable(dev); | 2507 | pm_runtime_enable(dev); |
2508 | |||
2509 | musb_start(musb); | ||
2510 | |||
2504 | return 0; | 2511 | return 0; |
2505 | } | 2512 | } |
2506 | 2513 | ||
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index d07cafb7d5f5..e499b862a946 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c | |||
@@ -551,6 +551,9 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel) | |||
551 | } else { | 551 | } else { |
552 | cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); | 552 | cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); |
553 | 553 | ||
554 | /* delay to drain to cppi dma pipeline for isoch */ | ||
555 | udelay(250); | ||
556 | |||
554 | csr = musb_readw(epio, MUSB_RXCSR); | 557 | csr = musb_readw(epio, MUSB_RXCSR); |
555 | csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); | 558 | csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); |
556 | musb_writew(epio, MUSB_RXCSR, csr); | 559 | musb_writew(epio, MUSB_RXCSR, csr); |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index a0cfead6150f..84512d1d5eee 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
@@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb) | |||
225 | 225 | ||
226 | dsps_writel(reg_base, wrp->epintr_set, epmask); | 226 | dsps_writel(reg_base, wrp->epintr_set, epmask); |
227 | dsps_writel(reg_base, wrp->coreintr_set, coremask); | 227 | dsps_writel(reg_base, wrp->coreintr_set, coremask); |
228 | /* start polling for ID change. */ | 228 | /* start polling for ID change in dual-role idle mode */ |
229 | mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout)); | 229 | if (musb->xceiv->otg->state == OTG_STATE_B_IDLE && |
230 | musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) | ||
231 | mod_timer(&glue->timer, jiffies + | ||
232 | msecs_to_jiffies(wrp->poll_timeout)); | ||
230 | dsps_musb_try_idle(musb, 0); | 233 | dsps_musb_try_idle(musb, 0); |
231 | } | 234 | } |
232 | 235 | ||
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index 39168fe9b406..b2685e75a683 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c | |||
@@ -379,6 +379,8 @@ static const struct of_device_id ux500_match[] = { | |||
379 | {} | 379 | {} |
380 | }; | 380 | }; |
381 | 381 | ||
382 | MODULE_DEVICE_TABLE(of, ux500_match); | ||
383 | |||
382 | static struct platform_driver ux500_driver = { | 384 | static struct platform_driver ux500_driver = { |
383 | .probe = ux500_probe, | 385 | .probe = ux500_probe, |
384 | .remove = ux500_remove, | 386 | .remove = ux500_remove, |
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 7d3beee2a587..173132416170 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
@@ -155,7 +155,7 @@ config USB_MSM_OTG | |||
155 | config USB_QCOM_8X16_PHY | 155 | config USB_QCOM_8X16_PHY |
156 | tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support" | 156 | tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support" |
157 | depends on ARCH_QCOM || COMPILE_TEST | 157 | depends on ARCH_QCOM || COMPILE_TEST |
158 | depends on RESET_CONTROLLER | 158 | depends on RESET_CONTROLLER && EXTCON |
159 | select USB_PHY | 159 | select USB_PHY |
160 | select USB_ULPI_VIEWPORT | 160 | select USB_ULPI_VIEWPORT |
161 | help | 161 | help |
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index ec6ecd03269c..5320cb8642cb 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c | |||
@@ -232,7 +232,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop, | |||
232 | clk_rate = pdata->clk_rate; | 232 | clk_rate = pdata->clk_rate; |
233 | needs_vcc = pdata->needs_vcc; | 233 | needs_vcc = pdata->needs_vcc; |
234 | if (gpio_is_valid(pdata->gpio_reset)) { | 234 | if (gpio_is_valid(pdata->gpio_reset)) { |
235 | err = devm_gpio_request_one(dev, pdata->gpio_reset, 0, | 235 | err = devm_gpio_request_one(dev, pdata->gpio_reset, |
236 | GPIOF_ACTIVE_LOW, | ||
236 | dev_name(dev)); | 237 | dev_name(dev)); |
237 | if (!err) | 238 | if (!err) |
238 | nop->gpiod_reset = | 239 | nop->gpiod_reset = |
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index 8a55b37d1a02..db68156568e6 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c | |||
@@ -31,6 +31,7 @@ static const struct i2c_device_id isp1301_id[] = { | |||
31 | { "isp1301", 0 }, | 31 | { "isp1301", 0 }, |
32 | { } | 32 | { } |
33 | }; | 33 | }; |
34 | MODULE_DEVICE_TABLE(i2c, isp1301_id); | ||
34 | 35 | ||
35 | static struct i2c_client *isp1301_i2c_client; | 36 | static struct i2c_client *isp1301_i2c_client; |
36 | 37 | ||
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7b98e1d9194c..d82fa36c3465 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c | |||
@@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = { | |||
476 | .compatible = "renesas,usbhs-r8a7794", | 476 | .compatible = "renesas,usbhs-r8a7794", |
477 | .data = (void *)USBHS_TYPE_RCAR_GEN2, | 477 | .data = (void *)USBHS_TYPE_RCAR_GEN2, |
478 | }, | 478 | }, |
479 | { | ||
480 | /* Gen3 is compatible with Gen2 */ | ||
481 | .compatible = "renesas,usbhs-r8a7795", | ||
482 | .data = (void *)USBHS_TYPE_RCAR_GEN2, | ||
483 | }, | ||
479 | { }, | 484 | { }, |
480 | }; | 485 | }; |
481 | MODULE_DEVICE_TABLE(of, usbhs_of_match); | 486 | MODULE_DEVICE_TABLE(of, usbhs_of_match); |
@@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) | |||
493 | return NULL; | 498 | return NULL; |
494 | 499 | ||
495 | dparam = &info->driver_param; | 500 | dparam = &info->driver_param; |
496 | dparam->type = of_id ? (u32)of_id->data : 0; | 501 | dparam->type = of_id ? (uintptr_t)of_id->data : 0; |
497 | if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) | 502 | if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) |
498 | dparam->buswait_bwait = tmp; | 503 | dparam->buswait_bwait = tmp; |
499 | gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, | 504 | gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6d1941a2396a..6956c4f62216 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb); | |||
278 | #define ZTE_PRODUCT_MF622 0x0001 | 278 | #define ZTE_PRODUCT_MF622 0x0001 |
279 | #define ZTE_PRODUCT_MF628 0x0015 | 279 | #define ZTE_PRODUCT_MF628 0x0015 |
280 | #define ZTE_PRODUCT_MF626 0x0031 | 280 | #define ZTE_PRODUCT_MF626 0x0031 |
281 | #define ZTE_PRODUCT_ZM8620_X 0x0396 | ||
282 | #define ZTE_PRODUCT_ME3620_MBIM 0x0426 | ||
283 | #define ZTE_PRODUCT_ME3620_X 0x1432 | ||
284 | #define ZTE_PRODUCT_ME3620_L 0x1433 | ||
281 | #define ZTE_PRODUCT_AC2726 0xfff1 | 285 | #define ZTE_PRODUCT_AC2726 0xfff1 |
282 | #define ZTE_PRODUCT_MG880 0xfffd | 286 | #define ZTE_PRODUCT_MG880 0xfffd |
283 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | 287 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe |
@@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = { | |||
544 | .sendsetup = BIT(1) | BIT(2) | BIT(3), | 548 | .sendsetup = BIT(1) | BIT(2) | BIT(3), |
545 | }; | 549 | }; |
546 | 550 | ||
551 | static const struct option_blacklist_info zte_me3620_mbim_blacklist = { | ||
552 | .reserved = BIT(2) | BIT(3) | BIT(4), | ||
553 | }; | ||
554 | |||
555 | static const struct option_blacklist_info zte_me3620_xl_blacklist = { | ||
556 | .reserved = BIT(3) | BIT(4) | BIT(5), | ||
557 | }; | ||
558 | |||
559 | static const struct option_blacklist_info zte_zm8620_x_blacklist = { | ||
560 | .reserved = BIT(3) | BIT(4) | BIT(5), | ||
561 | }; | ||
562 | |||
547 | static const struct option_blacklist_info huawei_cdc12_blacklist = { | 563 | static const struct option_blacklist_info huawei_cdc12_blacklist = { |
548 | .reserved = BIT(1) | BIT(2), | 564 | .reserved = BIT(1) | BIT(2), |
549 | }; | 565 | }; |
@@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = { | |||
1591 | .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, | 1607 | .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, |
1592 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), | 1608 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), |
1593 | .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, | 1609 | .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, |
1610 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), | ||
1611 | .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, | ||
1612 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), | ||
1613 | .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist }, | ||
1614 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), | ||
1615 | .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, | ||
1616 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), | ||
1617 | .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist }, | ||
1594 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, | 1618 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, |
1595 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, | 1619 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, |
1596 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, | 1620 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, |
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 6c3734d2b45a..d3ea90bef84d 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c | |||
@@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial, | |||
80 | static int whiteheat_firmware_attach(struct usb_serial *serial); | 80 | static int whiteheat_firmware_attach(struct usb_serial *serial); |
81 | 81 | ||
82 | /* function prototypes for the Connect Tech WhiteHEAT serial converter */ | 82 | /* function prototypes for the Connect Tech WhiteHEAT serial converter */ |
83 | static int whiteheat_probe(struct usb_serial *serial, | ||
84 | const struct usb_device_id *id); | ||
83 | static int whiteheat_attach(struct usb_serial *serial); | 85 | static int whiteheat_attach(struct usb_serial *serial); |
84 | static void whiteheat_release(struct usb_serial *serial); | 86 | static void whiteheat_release(struct usb_serial *serial); |
85 | static int whiteheat_port_probe(struct usb_serial_port *port); | 87 | static int whiteheat_port_probe(struct usb_serial_port *port); |
@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = { | |||
116 | .description = "Connect Tech - WhiteHEAT", | 118 | .description = "Connect Tech - WhiteHEAT", |
117 | .id_table = id_table_std, | 119 | .id_table = id_table_std, |
118 | .num_ports = 4, | 120 | .num_ports = 4, |
121 | .probe = whiteheat_probe, | ||
119 | .attach = whiteheat_attach, | 122 | .attach = whiteheat_attach, |
120 | .release = whiteheat_release, | 123 | .release = whiteheat_release, |
121 | .port_probe = whiteheat_port_probe, | 124 | .port_probe = whiteheat_port_probe, |
@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial) | |||
217 | /***************************************************************************** | 220 | /***************************************************************************** |
218 | * Connect Tech's White Heat serial driver functions | 221 | * Connect Tech's White Heat serial driver functions |
219 | *****************************************************************************/ | 222 | *****************************************************************************/ |
223 | |||
224 | static int whiteheat_probe(struct usb_serial *serial, | ||
225 | const struct usb_device_id *id) | ||
226 | { | ||
227 | struct usb_host_interface *iface_desc; | ||
228 | struct usb_endpoint_descriptor *endpoint; | ||
229 | size_t num_bulk_in = 0; | ||
230 | size_t num_bulk_out = 0; | ||
231 | size_t min_num_bulk; | ||
232 | unsigned int i; | ||
233 | |||
234 | iface_desc = serial->interface->cur_altsetting; | ||
235 | |||
236 | for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { | ||
237 | endpoint = &iface_desc->endpoint[i].desc; | ||
238 | if (usb_endpoint_is_bulk_in(endpoint)) | ||
239 | ++num_bulk_in; | ||
240 | if (usb_endpoint_is_bulk_out(endpoint)) | ||
241 | ++num_bulk_out; | ||
242 | } | ||
243 | |||
244 | min_num_bulk = COMMAND_PORT + 1; | ||
245 | if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk) | ||
246 | return -ENODEV; | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | |||
220 | static int whiteheat_attach(struct usb_serial *serial) | 251 | static int whiteheat_attach(struct usb_serial *serial) |
221 | { | 252 | { |
222 | struct usb_serial_port *command_port; | 253 | struct usb_serial_port *command_port; |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 7d137a43cc86..9eda69e40678 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -61,8 +61,7 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" | |||
61 | enum { | 61 | enum { |
62 | VHOST_NET_FEATURES = VHOST_FEATURES | | 62 | VHOST_NET_FEATURES = VHOST_FEATURES | |
63 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | | 63 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | |
64 | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | | 64 | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
65 | (1ULL << VIRTIO_F_VERSION_1), | ||
66 | }; | 65 | }; |
67 | 66 | ||
68 | enum { | 67 | enum { |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index f114a9dbb48f..e25a23692822 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -166,9 +166,7 @@ enum { | |||
166 | /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ | 166 | /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ |
167 | enum { | 167 | enum { |
168 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | | 168 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | |
169 | (1ULL << VIRTIO_SCSI_F_T10_PI) | | 169 | (1ULL << VIRTIO_SCSI_F_T10_PI) |
170 | (1ULL << VIRTIO_F_ANY_LAYOUT) | | ||
171 | (1ULL << VIRTIO_F_VERSION_1) | ||
172 | }; | 170 | }; |
173 | 171 | ||
174 | #define VHOST_SCSI_MAX_TARGET 256 | 172 | #define VHOST_SCSI_MAX_TARGET 256 |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index d9c501eaa6c3..f2882ac98726 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
@@ -277,10 +277,13 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, | |||
277 | return -EFAULT; | 277 | return -EFAULT; |
278 | return 0; | 278 | return 0; |
279 | case VHOST_SET_FEATURES: | 279 | case VHOST_SET_FEATURES: |
280 | printk(KERN_ERR "1\n"); | ||
280 | if (copy_from_user(&features, featurep, sizeof features)) | 281 | if (copy_from_user(&features, featurep, sizeof features)) |
281 | return -EFAULT; | 282 | return -EFAULT; |
283 | printk(KERN_ERR "2\n"); | ||
282 | if (features & ~VHOST_FEATURES) | 284 | if (features & ~VHOST_FEATURES) |
283 | return -EOPNOTSUPP; | 285 | return -EOPNOTSUPP; |
286 | printk(KERN_ERR "3\n"); | ||
284 | return vhost_test_set_features(n, features); | 287 | return vhost_test_set_features(n, features); |
285 | case VHOST_RESET_OWNER: | 288 | case VHOST_RESET_OWNER: |
286 | return vhost_test_reset_owner(n); | 289 | return vhost_test_reset_owner(n); |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index ce6f6da4b09f..4772862b71a7 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -173,7 +173,9 @@ enum { | |||
173 | VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | | 173 | VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | |
174 | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | | 174 | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | |
175 | (1ULL << VIRTIO_RING_F_EVENT_IDX) | | 175 | (1ULL << VIRTIO_RING_F_EVENT_IDX) | |
176 | (1ULL << VHOST_F_LOG_ALL), | 176 | (1ULL << VHOST_F_LOG_ALL) | |
177 | (1ULL << VIRTIO_F_ANY_LAYOUT) | | ||
178 | (1ULL << VIRTIO_F_VERSION_1) | ||
177 | }; | 179 | }; |
178 | 180 | ||
179 | static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) | 181 | static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) |
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 0e5fde1d3ffb..9f9a7bef1ff6 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c | |||
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, | |||
752 | if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { | 752 | if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { |
753 | dev_err(dev, "Invalid waveform\n"); | 753 | dev_err(dev, "Invalid waveform\n"); |
754 | err = -EINVAL; | 754 | err = -EINVAL; |
755 | goto err_failed; | 755 | goto err_fw; |
756 | } | 756 | } |
757 | 757 | ||
758 | mutex_lock(&(par->io_lock)); | 758 | mutex_lock(&(par->io_lock)); |
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, | |||
762 | mutex_unlock(&(par->io_lock)); | 762 | mutex_unlock(&(par->io_lock)); |
763 | if (err < 0) { | 763 | if (err < 0) { |
764 | dev_err(dev, "Failed to store broadsheet waveform\n"); | 764 | dev_err(dev, "Failed to store broadsheet waveform\n"); |
765 | goto err_failed; | 765 | goto err_fw; |
766 | } | 766 | } |
767 | 767 | ||
768 | dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); | 768 | dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); |
769 | 769 | ||
770 | return len; | 770 | err = len; |
771 | 771 | ||
772 | err_fw: | ||
773 | release_firmware(fw_entry); | ||
772 | err_failed: | 774 | err_failed: |
773 | return err; | 775 | return err; |
774 | } | 776 | } |
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fa2e6f9e322..b335c1ae8625 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c | |||
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) | |||
1628 | static int fsl_diu_resume(struct platform_device *ofdev) | 1628 | static int fsl_diu_resume(struct platform_device *ofdev) |
1629 | { | 1629 | { |
1630 | struct fsl_diu_data *data; | 1630 | struct fsl_diu_data *data; |
1631 | unsigned int i; | ||
1631 | 1632 | ||
1632 | data = dev_get_drvdata(&ofdev->dev); | 1633 | data = dev_get_drvdata(&ofdev->dev); |
1633 | enable_lcdc(data->fsl_diu_info); | 1634 | |
1635 | fsl_diu_enable_interrupts(data); | ||
1636 | update_lcdc(data->fsl_diu_info); | ||
1637 | for (i = 0; i < NUM_AOIS; i++) { | ||
1638 | if (data->mfb[i].count) | ||
1639 | fsl_diu_enable_panel(&data->fsl_diu_info[i]); | ||
1640 | } | ||
1634 | 1641 | ||
1635 | return 0; | 1642 | return 0; |
1636 | } | 1643 | } |
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index 9b8bebdf8f86..f9ec5c0484fa 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c | |||
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = { | |||
831 | { .compatible = "fujitsu,coral", }, | 831 | { .compatible = "fujitsu,coral", }, |
832 | { /* end */ } | 832 | { /* end */ } |
833 | }; | 833 | }; |
834 | MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); | ||
834 | 835 | ||
835 | static struct platform_driver of_platform_mb862xxfb_driver = { | 836 | static struct platform_driver of_platform_mb862xxfb_driver = { |
836 | .driver = { | 837 | .driver = { |
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index a8ce920fa797..d811e6dcaef7 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c | |||
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev) | |||
294 | 294 | ||
295 | adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); | 295 | adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); |
296 | if (adapter_node) { | 296 | if (adapter_node) { |
297 | adapter = of_find_i2c_adapter_by_node(adapter_node); | 297 | adapter = of_get_i2c_adapter_by_node(adapter_node); |
298 | if (adapter == NULL) { | 298 | if (adapter == NULL) { |
299 | dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); | 299 | dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); |
300 | omap_dss_put_device(ddata->in); | 300 | omap_dss_put_device(ddata->in); |
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 90cbc4c3406c..c581231c74a5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c | |||
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = { | |||
898 | { .compatible = "omapdss,sony,acx565akm", }, | 898 | { .compatible = "omapdss,sony,acx565akm", }, |
899 | {}, | 899 | {}, |
900 | }; | 900 | }; |
901 | MODULE_DEVICE_TABLE(of, acx565akm_of_match); | ||
901 | 902 | ||
902 | static struct spi_driver acx565akm_driver = { | 903 | static struct spi_driver acx565akm_driver = { |
903 | .driver = { | 904 | .driver = { |
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 7ed9a227f5ea..01b43e9ce941 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c | |||
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data, | |||
226 | writemmr(par, DST1, point(x, y)); | 226 | writemmr(par, DST1, point(x, y)); |
227 | writemmr(par, DST2, point(x + w - 1, y + h - 1)); | 227 | writemmr(par, DST2, point(x + w - 1, y + h - 1)); |
228 | 228 | ||
229 | memcpy(par->io_virt + 0x10000, data, 4 * size); | 229 | iowrite32_rep(par->io_virt + 0x10000, data, size); |
230 | } | 230 | } |
231 | 231 | ||
232 | static void blade_copy_rect(struct tridentfb_par *par, | 232 | static void blade_copy_rect(struct tridentfb_par *par, |
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par) | |||
673 | static inline void set_lwidth(struct tridentfb_par *par, int width) | 673 | static inline void set_lwidth(struct tridentfb_par *par, int width) |
674 | { | 674 | { |
675 | write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); | 675 | write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); |
676 | write3X4(par, AddColReg, | 676 | /* chips older than TGUI9660 have only 1 width bit in AddColReg */ |
677 | (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); | 677 | /* touching the other one breaks I2C/DDC */ |
678 | if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) | ||
679 | write3X4(par, AddColReg, | ||
680 | (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); | ||
681 | else | ||
682 | write3X4(par, AddColReg, | ||
683 | (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); | ||
678 | } | 684 | } |
679 | 685 | ||
680 | /* For resolutions smaller than FP resolution stretch */ | 686 | /* For resolutions smaller than FP resolution stretch */ |
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32d8275e4c88..8a1076beecd3 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c | |||
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np) | |||
210 | */ | 210 | */ |
211 | pr_err("%s: error in timing %d\n", | 211 | pr_err("%s: error in timing %d\n", |
212 | of_node_full_name(np), disp->num_timings + 1); | 212 | of_node_full_name(np), disp->num_timings + 1); |
213 | kfree(dt); | ||
213 | goto timingfail; | 214 | goto timingfail; |
214 | } | 215 | } |
215 | 216 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -817,8 +817,9 @@ config ITCO_WDT | |||
817 | tristate "Intel TCO Timer/Watchdog" | 817 | tristate "Intel TCO Timer/Watchdog" |
818 | depends on (X86 || IA64) && PCI | 818 | depends on (X86 || IA64) && PCI |
819 | select WATCHDOG_CORE | 819 | select WATCHDOG_CORE |
820 | depends on I2C || I2C=n | ||
820 | select LPC_ICH if !EXPERT | 821 | select LPC_ICH if !EXPERT |
821 | select I2C_I801 if !EXPERT | 822 | select I2C_I801 if !EXPERT && I2C |
822 | ---help--- | 823 | ---help--- |
823 | Hardware driver for the intel TCO timer based watchdog devices. | 824 | Hardware driver for the intel TCO timer based watchdog devices. |
824 | These drivers are included in the Intel 82801 I/O Controller | 825 | These drivers are included in the Intel 82801 I/O Controller |
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c | |||
@@ -36,6 +36,13 @@ | |||
36 | #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 | 36 | #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 |
37 | #define PM_RSTC_RESET 0x00000102 | 37 | #define PM_RSTC_RESET 0x00000102 |
38 | 38 | ||
39 | /* | ||
40 | * The Raspberry Pi firmware uses the RSTS register to know which partiton | ||
41 | * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. | ||
42 | * Partiton 63 is a special partition used by the firmware to indicate halt. | ||
43 | */ | ||
44 | #define PM_RSTS_RASPBERRYPI_HALT 0x555 | ||
45 | |||
39 | #define SECS_TO_WDOG_TICKS(x) ((x) << 16) | 46 | #define SECS_TO_WDOG_TICKS(x) ((x) << 16) |
40 | #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) | 47 | #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) |
41 | 48 | ||
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void) | |||
151 | * hard reset. | 158 | * hard reset. |
152 | */ | 159 | */ |
153 | val = readl_relaxed(wdt->base + PM_RSTS); | 160 | val = readl_relaxed(wdt->base + PM_RSTS); |
154 | val &= PM_RSTC_WRCFG_CLR; | 161 | val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; |
155 | val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; | ||
156 | writel_relaxed(val, wdt->base + PM_RSTS); | 162 | writel_relaxed(val, wdt->base + PM_RSTS); |
157 | 163 | ||
158 | /* Continue with normal reset mechanism */ | 164 | /* Continue with normal reset mechanism */ |
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c | |||
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { | |||
303 | }, | 303 | }, |
304 | {}, | 304 | {}, |
305 | }; | 305 | }; |
306 | MODULE_DEVICE_TABLE(of, gef_wdt_ids); | ||
306 | 307 | ||
307 | static struct platform_driver gef_wdt_driver = { | 308 | static struct platform_driver gef_wdt_driver = { |
308 | .driver = { | 309 | .driver = { |
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c | |||
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { | |||
253 | { .compatible = "men,a021-wdt" }, | 253 | { .compatible = "men,a021-wdt" }, |
254 | { }, | 254 | { }, |
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(of, a21_wdt_ids); | ||
256 | 257 | ||
257 | static struct platform_driver a21_wdt_driver = { | 258 | static struct platform_driver a21_wdt_driver = { |
258 | .probe = a21_wdt_probe, | 259 | .probe = a21_wdt_probe, |
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c | |||
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { | |||
168 | { .compatible = "moxa,moxart-watchdog" }, | 168 | { .compatible = "moxa,moxart-watchdog" }, |
169 | { }, | 169 | { }, |
170 | }; | 170 | }; |
171 | MODULE_DEVICE_TABLE(of, moxart_watchdog_match); | ||
171 | 172 | ||
172 | static struct platform_driver moxart_wdt_driver = { | 173 | static struct platform_driver moxart_wdt_driver = { |
173 | .probe = moxart_wdt_probe, | 174 | .probe = moxart_wdt_probe, |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 22ea424ee741..073bb57adab1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1242,6 +1242,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1242 | goto out_clear; | 1242 | goto out_clear; |
1243 | } | 1243 | } |
1244 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); | 1244 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
1245 | /* | ||
1246 | * If the partition is not aligned on a page | ||
1247 | * boundary, we can't do dax I/O to it. | ||
1248 | */ | ||
1249 | if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) || | ||
1250 | (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) | ||
1251 | bdev->bd_inode->i_flags &= ~S_DAX; | ||
1245 | } | 1252 | } |
1246 | } else { | 1253 | } else { |
1247 | if (bdev->bd_contains == bdev) { | 1254 | if (bdev->bd_contains == bdev) { |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ecbc63d3143e..9a2ec79e8cfb 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, | |||
1828 | int found = 0; | 1828 | int found = 0; |
1829 | struct extent_buffer *eb; | 1829 | struct extent_buffer *eb; |
1830 | struct btrfs_inode_extref *extref; | 1830 | struct btrfs_inode_extref *extref; |
1831 | struct extent_buffer *leaf; | ||
1832 | u32 item_size; | 1831 | u32 item_size; |
1833 | u32 cur_offset; | 1832 | u32 cur_offset; |
1834 | unsigned long ptr; | 1833 | unsigned long ptr; |
@@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, | |||
1856 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | 1855 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
1857 | btrfs_release_path(path); | 1856 | btrfs_release_path(path); |
1858 | 1857 | ||
1859 | leaf = path->nodes[0]; | 1858 | item_size = btrfs_item_size_nr(eb, slot); |
1860 | item_size = btrfs_item_size_nr(leaf, slot); | 1859 | ptr = btrfs_item_ptr_offset(eb, slot); |
1861 | ptr = btrfs_item_ptr_offset(leaf, slot); | ||
1862 | cur_offset = 0; | 1860 | cur_offset = 0; |
1863 | 1861 | ||
1864 | while (cur_offset < item_size) { | 1862 | while (cur_offset < item_size) { |
@@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, | |||
1872 | if (ret) | 1870 | if (ret) |
1873 | break; | 1871 | break; |
1874 | 1872 | ||
1875 | cur_offset += btrfs_inode_extref_name_len(leaf, extref); | 1873 | cur_offset += btrfs_inode_extref_name_len(eb, extref); |
1876 | cur_offset += sizeof(*extref); | 1874 | cur_offset += sizeof(*extref); |
1877 | } | 1875 | } |
1878 | btrfs_tree_read_unlock_blocking(eb); | 1876 | btrfs_tree_read_unlock_blocking(eb); |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 81220b2203c6..0ef5cc13fae2 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -44,8 +44,6 @@ | |||
44 | #define BTRFS_INODE_IN_DELALLOC_LIST 9 | 44 | #define BTRFS_INODE_IN_DELALLOC_LIST 9 |
45 | #define BTRFS_INODE_READDIO_NEED_LOCK 10 | 45 | #define BTRFS_INODE_READDIO_NEED_LOCK 10 |
46 | #define BTRFS_INODE_HAS_PROPS 11 | 46 | #define BTRFS_INODE_HAS_PROPS 11 |
47 | /* DIO is ready to submit */ | ||
48 | #define BTRFS_INODE_DIO_READY 12 | ||
49 | /* | 47 | /* |
50 | * The following 3 bits are meant only for the btree inode. | 48 | * The following 3 bits are meant only for the btree inode. |
51 | * When any of them is set, it means an error happened while writing an | 49 | * When any of them is set, it means an error happened while writing an |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0d98aee34fee..1e60d00d4ea7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb, | |||
2847 | !extent_buffer_uptodate(chunk_root->node)) { | 2847 | !extent_buffer_uptodate(chunk_root->node)) { |
2848 | printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", | 2848 | printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", |
2849 | sb->s_id); | 2849 | sb->s_id); |
2850 | if (!IS_ERR(chunk_root->node)) | ||
2851 | free_extent_buffer(chunk_root->node); | ||
2850 | chunk_root->node = NULL; | 2852 | chunk_root->node = NULL; |
2851 | goto fail_tree_roots; | 2853 | goto fail_tree_roots; |
2852 | } | 2854 | } |
@@ -2885,6 +2887,8 @@ retry_root_backup: | |||
2885 | !extent_buffer_uptodate(tree_root->node)) { | 2887 | !extent_buffer_uptodate(tree_root->node)) { |
2886 | printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", | 2888 | printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", |
2887 | sb->s_id); | 2889 | sb->s_id); |
2890 | if (!IS_ERR(tree_root->node)) | ||
2891 | free_extent_buffer(tree_root->node); | ||
2888 | tree_root->node = NULL; | 2892 | tree_root->node = NULL; |
2889 | goto recovery_tree_root; | 2893 | goto recovery_tree_root; |
2890 | } | 2894 | } |
@@ -3765,9 +3769,7 @@ void close_ctree(struct btrfs_root *root) | |||
3765 | * block groups queued for removal, the deletion will be | 3769 | * block groups queued for removal, the deletion will be |
3766 | * skipped when we quit the cleaner thread. | 3770 | * skipped when we quit the cleaner thread. |
3767 | */ | 3771 | */ |
3768 | mutex_lock(&root->fs_info->cleaner_mutex); | ||
3769 | btrfs_delete_unused_bgs(root->fs_info); | 3772 | btrfs_delete_unused_bgs(root->fs_info); |
3770 | mutex_unlock(&root->fs_info->cleaner_mutex); | ||
3771 | 3773 | ||
3772 | ret = btrfs_commit_super(root); | 3774 | ret = btrfs_commit_super(root); |
3773 | if (ret) | 3775 | if (ret) |
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 8d052209f473..2513a7f53334 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c | |||
@@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, | |||
112 | u32 generation; | 112 | u32 generation; |
113 | 113 | ||
114 | if (fh_type == FILEID_BTRFS_WITH_PARENT) { | 114 | if (fh_type == FILEID_BTRFS_WITH_PARENT) { |
115 | if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) | 115 | if (fh_len < BTRFS_FID_SIZE_CONNECTABLE) |
116 | return NULL; | 116 | return NULL; |
117 | root_objectid = fid->root_objectid; | 117 | root_objectid = fid->root_objectid; |
118 | } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { | 118 | } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { |
119 | if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) | 119 | if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) |
120 | return NULL; | 120 | return NULL; |
121 | root_objectid = fid->parent_root_objectid; | 121 | root_objectid = fid->parent_root_objectid; |
122 | } else | 122 | } else |
@@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, | |||
136 | u32 generation; | 136 | u32 generation; |
137 | 137 | ||
138 | if ((fh_type != FILEID_BTRFS_WITH_PARENT || | 138 | if ((fh_type != FILEID_BTRFS_WITH_PARENT || |
139 | fh_len != BTRFS_FID_SIZE_CONNECTABLE) && | 139 | fh_len < BTRFS_FID_SIZE_CONNECTABLE) && |
140 | (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || | 140 | (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || |
141 | fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && | 141 | fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) && |
142 | (fh_type != FILEID_BTRFS_WITHOUT_PARENT || | 142 | (fh_type != FILEID_BTRFS_WITHOUT_PARENT || |
143 | fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) | 143 | fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE)) |
144 | return NULL; | 144 | return NULL; |
145 | 145 | ||
146 | objectid = fid->objectid; | 146 | objectid = fid->objectid; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5411f0ab5683..601d7d45d164 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2828 | struct btrfs_delayed_ref_head *head; | 2828 | struct btrfs_delayed_ref_head *head; |
2829 | int ret; | 2829 | int ret; |
2830 | int run_all = count == (unsigned long)-1; | 2830 | int run_all = count == (unsigned long)-1; |
2831 | bool can_flush_pending_bgs = trans->can_flush_pending_bgs; | ||
2831 | 2832 | ||
2832 | /* We'll clean this up in btrfs_cleanup_transaction */ | 2833 | /* We'll clean this up in btrfs_cleanup_transaction */ |
2833 | if (trans->aborted) | 2834 | if (trans->aborted) |
@@ -2844,6 +2845,7 @@ again: | |||
2844 | #ifdef SCRAMBLE_DELAYED_REFS | 2845 | #ifdef SCRAMBLE_DELAYED_REFS |
2845 | delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); | 2846 | delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); |
2846 | #endif | 2847 | #endif |
2848 | trans->can_flush_pending_bgs = false; | ||
2847 | ret = __btrfs_run_delayed_refs(trans, root, count); | 2849 | ret = __btrfs_run_delayed_refs(trans, root, count); |
2848 | if (ret < 0) { | 2850 | if (ret < 0) { |
2849 | btrfs_abort_transaction(trans, root, ret); | 2851 | btrfs_abort_transaction(trans, root, ret); |
@@ -2893,6 +2895,7 @@ again: | |||
2893 | } | 2895 | } |
2894 | out: | 2896 | out: |
2895 | assert_qgroups_uptodate(trans); | 2897 | assert_qgroups_uptodate(trans); |
2898 | trans->can_flush_pending_bgs = can_flush_pending_bgs; | ||
2896 | return 0; | 2899 | return 0; |
2897 | } | 2900 | } |
2898 | 2901 | ||
@@ -3742,10 +3745,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
3742 | found->bytes_reserved = 0; | 3745 | found->bytes_reserved = 0; |
3743 | found->bytes_readonly = 0; | 3746 | found->bytes_readonly = 0; |
3744 | found->bytes_may_use = 0; | 3747 | found->bytes_may_use = 0; |
3745 | if (total_bytes > 0) | 3748 | found->full = 0; |
3746 | found->full = 0; | ||
3747 | else | ||
3748 | found->full = 1; | ||
3749 | found->force_alloc = CHUNK_ALLOC_NO_FORCE; | 3749 | found->force_alloc = CHUNK_ALLOC_NO_FORCE; |
3750 | found->chunk_alloc = 0; | 3750 | found->chunk_alloc = 0; |
3751 | found->flush = 0; | 3751 | found->flush = 0; |
@@ -4309,7 +4309,8 @@ out: | |||
4309 | * the block groups that were made dirty during the lifetime of the | 4309 | * the block groups that were made dirty during the lifetime of the |
4310 | * transaction. | 4310 | * transaction. |
4311 | */ | 4311 | */ |
4312 | if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { | 4312 | if (trans->can_flush_pending_bgs && |
4313 | trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { | ||
4313 | btrfs_create_pending_block_groups(trans, trans->root); | 4314 | btrfs_create_pending_block_groups(trans, trans->root); |
4314 | btrfs_trans_release_chunk_metadata(trans); | 4315 | btrfs_trans_release_chunk_metadata(trans); |
4315 | } | 4316 | } |
@@ -8668,7 +8669,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
8668 | } | 8669 | } |
8669 | 8670 | ||
8670 | if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { | 8671 | if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { |
8671 | btrfs_drop_and_free_fs_root(tree_root->fs_info, root); | 8672 | btrfs_add_dropped_root(trans, root); |
8672 | } else { | 8673 | } else { |
8673 | free_extent_buffer(root->node); | 8674 | free_extent_buffer(root->node); |
8674 | free_extent_buffer(root->commit_root); | 8675 | free_extent_buffer(root->commit_root); |
@@ -9563,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, | |||
9563 | struct btrfs_block_group_item item; | 9564 | struct btrfs_block_group_item item; |
9564 | struct btrfs_key key; | 9565 | struct btrfs_key key; |
9565 | int ret = 0; | 9566 | int ret = 0; |
9567 | bool can_flush_pending_bgs = trans->can_flush_pending_bgs; | ||
9566 | 9568 | ||
9569 | trans->can_flush_pending_bgs = false; | ||
9567 | list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { | 9570 | list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { |
9568 | if (ret) | 9571 | if (ret) |
9569 | goto next; | 9572 | goto next; |
@@ -9584,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, | |||
9584 | next: | 9587 | next: |
9585 | list_del_init(&block_group->bg_list); | 9588 | list_del_init(&block_group->bg_list); |
9586 | } | 9589 | } |
9590 | trans->can_flush_pending_bgs = can_flush_pending_bgs; | ||
9587 | } | 9591 | } |
9588 | 9592 | ||
9589 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, | 9593 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f1018cfbfefa..3915c9473e94 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2798 | bio_end_io_t end_io_func, | 2798 | bio_end_io_t end_io_func, |
2799 | int mirror_num, | 2799 | int mirror_num, |
2800 | unsigned long prev_bio_flags, | 2800 | unsigned long prev_bio_flags, |
2801 | unsigned long bio_flags) | 2801 | unsigned long bio_flags, |
2802 | bool force_bio_submit) | ||
2802 | { | 2803 | { |
2803 | int ret = 0; | 2804 | int ret = 0; |
2804 | struct bio *bio; | 2805 | struct bio *bio; |
@@ -2814,6 +2815,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2814 | contig = bio_end_sector(bio) == sector; | 2815 | contig = bio_end_sector(bio) == sector; |
2815 | 2816 | ||
2816 | if (prev_bio_flags != bio_flags || !contig || | 2817 | if (prev_bio_flags != bio_flags || !contig || |
2818 | force_bio_submit || | ||
2817 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || | 2819 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || |
2818 | bio_add_page(bio, page, page_size, offset) < page_size) { | 2820 | bio_add_page(bio, page, page_size, offset) < page_size) { |
2819 | ret = submit_one_bio(rw, bio, mirror_num, | 2821 | ret = submit_one_bio(rw, bio, mirror_num, |
@@ -2910,7 +2912,8 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2910 | get_extent_t *get_extent, | 2912 | get_extent_t *get_extent, |
2911 | struct extent_map **em_cached, | 2913 | struct extent_map **em_cached, |
2912 | struct bio **bio, int mirror_num, | 2914 | struct bio **bio, int mirror_num, |
2913 | unsigned long *bio_flags, int rw) | 2915 | unsigned long *bio_flags, int rw, |
2916 | u64 *prev_em_start) | ||
2914 | { | 2917 | { |
2915 | struct inode *inode = page->mapping->host; | 2918 | struct inode *inode = page->mapping->host; |
2916 | u64 start = page_offset(page); | 2919 | u64 start = page_offset(page); |
@@ -2958,6 +2961,7 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2958 | } | 2961 | } |
2959 | while (cur <= end) { | 2962 | while (cur <= end) { |
2960 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; | 2963 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; |
2964 | bool force_bio_submit = false; | ||
2961 | 2965 | ||
2962 | if (cur >= last_byte) { | 2966 | if (cur >= last_byte) { |
2963 | char *userpage; | 2967 | char *userpage; |
@@ -3008,6 +3012,49 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
3008 | block_start = em->block_start; | 3012 | block_start = em->block_start; |
3009 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | 3013 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) |
3010 | block_start = EXTENT_MAP_HOLE; | 3014 | block_start = EXTENT_MAP_HOLE; |
3015 | |||
3016 | /* | ||
3017 | * If we have a file range that points to a compressed extent | ||
3018 | * and it's followed by a consecutive file range that points to | ||
3019 | * to the same compressed extent (possibly with a different | ||
3020 | * offset and/or length, so it either points to the whole extent | ||
3021 | * or only part of it), we must make sure we do not submit a | ||
3022 | * single bio to populate the pages for the 2 ranges because | ||
3023 | * this makes the compressed extent read zero out the pages | ||
3024 | * belonging to the 2nd range. Imagine the following scenario: | ||
3025 | * | ||
3026 | * File layout | ||
3027 | * [0 - 8K] [8K - 24K] | ||
3028 | * | | | ||
3029 | * | | | ||
3030 | * points to extent X, points to extent X, | ||
3031 | * offset 4K, length of 8K offset 0, length 16K | ||
3032 | * | ||
3033 | * [extent X, compressed length = 4K uncompressed length = 16K] | ||
3034 | * | ||
3035 | * If the bio to read the compressed extent covers both ranges, | ||
3036 | * it will decompress extent X into the pages belonging to the | ||
3037 | * first range and then it will stop, zeroing out the remaining | ||
3038 | * pages that belong to the other range that points to extent X. | ||
3039 | * So here we make sure we submit 2 bios, one for the first | ||
3040 | * range and another one for the third range. Both will target | ||
3041 | * the same physical extent from disk, but we can't currently | ||
3042 | * make the compressed bio endio callback populate the pages | ||
3043 | * for both ranges because each compressed bio is tightly | ||
3044 | * coupled with a single extent map, and each range can have | ||
3045 | * an extent map with a different offset value relative to the | ||
3046 | * uncompressed data of our extent and different lengths. This | ||
3047 | * is a corner case so we prioritize correctness over | ||
3048 | * non-optimal behavior (submitting 2 bios for the same extent). | ||
3049 | */ | ||
3050 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && | ||
3051 | prev_em_start && *prev_em_start != (u64)-1 && | ||
3052 | *prev_em_start != em->orig_start) | ||
3053 | force_bio_submit = true; | ||
3054 | |||
3055 | if (prev_em_start) | ||
3056 | *prev_em_start = em->orig_start; | ||
3057 | |||
3011 | free_extent_map(em); | 3058 | free_extent_map(em); |
3012 | em = NULL; | 3059 | em = NULL; |
3013 | 3060 | ||
@@ -3057,7 +3104,8 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
3057 | bdev, bio, pnr, | 3104 | bdev, bio, pnr, |
3058 | end_bio_extent_readpage, mirror_num, | 3105 | end_bio_extent_readpage, mirror_num, |
3059 | *bio_flags, | 3106 | *bio_flags, |
3060 | this_bio_flag); | 3107 | this_bio_flag, |
3108 | force_bio_submit); | ||
3061 | if (!ret) { | 3109 | if (!ret) { |
3062 | nr++; | 3110 | nr++; |
3063 | *bio_flags = this_bio_flag; | 3111 | *bio_flags = this_bio_flag; |
@@ -3084,7 +3132,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3084 | get_extent_t *get_extent, | 3132 | get_extent_t *get_extent, |
3085 | struct extent_map **em_cached, | 3133 | struct extent_map **em_cached, |
3086 | struct bio **bio, int mirror_num, | 3134 | struct bio **bio, int mirror_num, |
3087 | unsigned long *bio_flags, int rw) | 3135 | unsigned long *bio_flags, int rw, |
3136 | u64 *prev_em_start) | ||
3088 | { | 3137 | { |
3089 | struct inode *inode; | 3138 | struct inode *inode; |
3090 | struct btrfs_ordered_extent *ordered; | 3139 | struct btrfs_ordered_extent *ordered; |
@@ -3104,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3104 | 3153 | ||
3105 | for (index = 0; index < nr_pages; index++) { | 3154 | for (index = 0; index < nr_pages; index++) { |
3106 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, | 3155 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, |
3107 | mirror_num, bio_flags, rw); | 3156 | mirror_num, bio_flags, rw, prev_em_start); |
3108 | page_cache_release(pages[index]); | 3157 | page_cache_release(pages[index]); |
3109 | } | 3158 | } |
3110 | } | 3159 | } |
@@ -3114,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3114 | int nr_pages, get_extent_t *get_extent, | 3163 | int nr_pages, get_extent_t *get_extent, |
3115 | struct extent_map **em_cached, | 3164 | struct extent_map **em_cached, |
3116 | struct bio **bio, int mirror_num, | 3165 | struct bio **bio, int mirror_num, |
3117 | unsigned long *bio_flags, int rw) | 3166 | unsigned long *bio_flags, int rw, |
3167 | u64 *prev_em_start) | ||
3118 | { | 3168 | { |
3119 | u64 start = 0; | 3169 | u64 start = 0; |
3120 | u64 end = 0; | 3170 | u64 end = 0; |
@@ -3135,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3135 | index - first_index, start, | 3185 | index - first_index, start, |
3136 | end, get_extent, em_cached, | 3186 | end, get_extent, em_cached, |
3137 | bio, mirror_num, bio_flags, | 3187 | bio, mirror_num, bio_flags, |
3138 | rw); | 3188 | rw, prev_em_start); |
3139 | start = page_start; | 3189 | start = page_start; |
3140 | end = start + PAGE_CACHE_SIZE - 1; | 3190 | end = start + PAGE_CACHE_SIZE - 1; |
3141 | first_index = index; | 3191 | first_index = index; |
@@ -3146,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3146 | __do_contiguous_readpages(tree, &pages[first_index], | 3196 | __do_contiguous_readpages(tree, &pages[first_index], |
3147 | index - first_index, start, | 3197 | index - first_index, start, |
3148 | end, get_extent, em_cached, bio, | 3198 | end, get_extent, em_cached, bio, |
3149 | mirror_num, bio_flags, rw); | 3199 | mirror_num, bio_flags, rw, |
3200 | prev_em_start); | ||
3150 | } | 3201 | } |
3151 | 3202 | ||
3152 | static int __extent_read_full_page(struct extent_io_tree *tree, | 3203 | static int __extent_read_full_page(struct extent_io_tree *tree, |
@@ -3172,7 +3223,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
3172 | } | 3223 | } |
3173 | 3224 | ||
3174 | ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, | 3225 | ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, |
3175 | bio_flags, rw); | 3226 | bio_flags, rw, NULL); |
3176 | return ret; | 3227 | return ret; |
3177 | } | 3228 | } |
3178 | 3229 | ||
@@ -3198,7 +3249,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, | |||
3198 | int ret; | 3249 | int ret; |
3199 | 3250 | ||
3200 | ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, | 3251 | ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, |
3201 | &bio_flags, READ); | 3252 | &bio_flags, READ, NULL); |
3202 | if (bio) | 3253 | if (bio) |
3203 | ret = submit_one_bio(READ, bio, mirror_num, bio_flags); | 3254 | ret = submit_one_bio(READ, bio, mirror_num, bio_flags); |
3204 | return ret; | 3255 | return ret; |
@@ -3451,7 +3502,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, | |||
3451 | sector, iosize, pg_offset, | 3502 | sector, iosize, pg_offset, |
3452 | bdev, &epd->bio, max_nr, | 3503 | bdev, &epd->bio, max_nr, |
3453 | end_bio_extent_writepage, | 3504 | end_bio_extent_writepage, |
3454 | 0, 0, 0); | 3505 | 0, 0, 0, false); |
3455 | if (ret) | 3506 | if (ret) |
3456 | SetPageError(page); | 3507 | SetPageError(page); |
3457 | } | 3508 | } |
@@ -3754,7 +3805,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3754 | ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, | 3805 | ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, |
3755 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, | 3806 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, |
3756 | -1, end_bio_extent_buffer_writepage, | 3807 | -1, end_bio_extent_buffer_writepage, |
3757 | 0, epd->bio_flags, bio_flags); | 3808 | 0, epd->bio_flags, bio_flags, false); |
3758 | epd->bio_flags = bio_flags; | 3809 | epd->bio_flags = bio_flags; |
3759 | if (ret) { | 3810 | if (ret) { |
3760 | set_btree_ioerr(p); | 3811 | set_btree_ioerr(p); |
@@ -4158,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4158 | struct page *page; | 4209 | struct page *page; |
4159 | struct extent_map *em_cached = NULL; | 4210 | struct extent_map *em_cached = NULL; |
4160 | int nr = 0; | 4211 | int nr = 0; |
4212 | u64 prev_em_start = (u64)-1; | ||
4161 | 4213 | ||
4162 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 4214 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
4163 | page = list_entry(pages->prev, struct page, lru); | 4215 | page = list_entry(pages->prev, struct page, lru); |
@@ -4174,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4174 | if (nr < ARRAY_SIZE(pagepool)) | 4226 | if (nr < ARRAY_SIZE(pagepool)) |
4175 | continue; | 4227 | continue; |
4176 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4228 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4177 | &bio, 0, &bio_flags, READ); | 4229 | &bio, 0, &bio_flags, READ, &prev_em_start); |
4178 | nr = 0; | 4230 | nr = 0; |
4179 | } | 4231 | } |
4180 | if (nr) | 4232 | if (nr) |
4181 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4233 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4182 | &bio, 0, &bio_flags, READ); | 4234 | &bio, 0, &bio_flags, READ, &prev_em_start); |
4183 | 4235 | ||
4184 | if (em_cached) | 4236 | if (em_cached) |
4185 | free_extent_map(em_cached); | 4237 | free_extent_map(em_cached); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0fa7253a2d7..611b66d73e80 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -5084,7 +5084,8 @@ void btrfs_evict_inode(struct inode *inode) | |||
5084 | goto no_delete; | 5084 | goto no_delete; |
5085 | } | 5085 | } |
5086 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ | 5086 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ |
5087 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | 5087 | if (!special_file(inode->i_mode)) |
5088 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | ||
5088 | 5089 | ||
5089 | btrfs_free_io_failure_record(inode, 0, (u64)-1); | 5090 | btrfs_free_io_failure_record(inode, 0, (u64)-1); |
5090 | 5091 | ||
@@ -7408,6 +7409,10 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |||
7408 | return em; | 7409 | return em; |
7409 | } | 7410 | } |
7410 | 7411 | ||
7412 | struct btrfs_dio_data { | ||
7413 | u64 outstanding_extents; | ||
7414 | u64 reserve; | ||
7415 | }; | ||
7411 | 7416 | ||
7412 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | 7417 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
7413 | struct buffer_head *bh_result, int create) | 7418 | struct buffer_head *bh_result, int create) |
@@ -7415,10 +7420,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7415 | struct extent_map *em; | 7420 | struct extent_map *em; |
7416 | struct btrfs_root *root = BTRFS_I(inode)->root; | 7421 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7417 | struct extent_state *cached_state = NULL; | 7422 | struct extent_state *cached_state = NULL; |
7423 | struct btrfs_dio_data *dio_data = NULL; | ||
7418 | u64 start = iblock << inode->i_blkbits; | 7424 | u64 start = iblock << inode->i_blkbits; |
7419 | u64 lockstart, lockend; | 7425 | u64 lockstart, lockend; |
7420 | u64 len = bh_result->b_size; | 7426 | u64 len = bh_result->b_size; |
7421 | u64 *outstanding_extents = NULL; | ||
7422 | int unlock_bits = EXTENT_LOCKED; | 7427 | int unlock_bits = EXTENT_LOCKED; |
7423 | int ret = 0; | 7428 | int ret = 0; |
7424 | 7429 | ||
@@ -7436,7 +7441,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7436 | * that anything that needs to check if there's a transction doesn't get | 7441 | * that anything that needs to check if there's a transction doesn't get |
7437 | * confused. | 7442 | * confused. |
7438 | */ | 7443 | */ |
7439 | outstanding_extents = current->journal_info; | 7444 | dio_data = current->journal_info; |
7440 | current->journal_info = NULL; | 7445 | current->journal_info = NULL; |
7441 | } | 7446 | } |
7442 | 7447 | ||
@@ -7568,17 +7573,18 @@ unlock: | |||
7568 | * within our reservation, otherwise we need to adjust our inode | 7573 | * within our reservation, otherwise we need to adjust our inode |
7569 | * counter appropriately. | 7574 | * counter appropriately. |
7570 | */ | 7575 | */ |
7571 | if (*outstanding_extents) { | 7576 | if (dio_data->outstanding_extents) { |
7572 | (*outstanding_extents)--; | 7577 | (dio_data->outstanding_extents)--; |
7573 | } else { | 7578 | } else { |
7574 | spin_lock(&BTRFS_I(inode)->lock); | 7579 | spin_lock(&BTRFS_I(inode)->lock); |
7575 | BTRFS_I(inode)->outstanding_extents++; | 7580 | BTRFS_I(inode)->outstanding_extents++; |
7576 | spin_unlock(&BTRFS_I(inode)->lock); | 7581 | spin_unlock(&BTRFS_I(inode)->lock); |
7577 | } | 7582 | } |
7578 | 7583 | ||
7579 | current->journal_info = outstanding_extents; | ||
7580 | btrfs_free_reserved_data_space(inode, len); | 7584 | btrfs_free_reserved_data_space(inode, len); |
7581 | set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags); | 7585 | WARN_ON(dio_data->reserve < len); |
7586 | dio_data->reserve -= len; | ||
7587 | current->journal_info = dio_data; | ||
7582 | } | 7588 | } |
7583 | 7589 | ||
7584 | /* | 7590 | /* |
@@ -7601,8 +7607,8 @@ unlock: | |||
7601 | unlock_err: | 7607 | unlock_err: |
7602 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | 7608 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
7603 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | 7609 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); |
7604 | if (outstanding_extents) | 7610 | if (dio_data) |
7605 | current->journal_info = outstanding_extents; | 7611 | current->journal_info = dio_data; |
7606 | return ret; | 7612 | return ret; |
7607 | } | 7613 | } |
7608 | 7614 | ||
@@ -8329,7 +8335,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |||
8329 | { | 8335 | { |
8330 | struct file *file = iocb->ki_filp; | 8336 | struct file *file = iocb->ki_filp; |
8331 | struct inode *inode = file->f_mapping->host; | 8337 | struct inode *inode = file->f_mapping->host; |
8332 | u64 outstanding_extents = 0; | 8338 | struct btrfs_root *root = BTRFS_I(inode)->root; |
8339 | struct btrfs_dio_data dio_data = { 0 }; | ||
8333 | size_t count = 0; | 8340 | size_t count = 0; |
8334 | int flags = 0; | 8341 | int flags = 0; |
8335 | bool wakeup = true; | 8342 | bool wakeup = true; |
@@ -8367,7 +8374,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |||
8367 | ret = btrfs_delalloc_reserve_space(inode, count); | 8374 | ret = btrfs_delalloc_reserve_space(inode, count); |
8368 | if (ret) | 8375 | if (ret) |
8369 | goto out; | 8376 | goto out; |
8370 | outstanding_extents = div64_u64(count + | 8377 | dio_data.outstanding_extents = div64_u64(count + |
8371 | BTRFS_MAX_EXTENT_SIZE - 1, | 8378 | BTRFS_MAX_EXTENT_SIZE - 1, |
8372 | BTRFS_MAX_EXTENT_SIZE); | 8379 | BTRFS_MAX_EXTENT_SIZE); |
8373 | 8380 | ||
@@ -8376,7 +8383,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |||
8376 | * do the accounting properly if we go over the number we | 8383 | * do the accounting properly if we go over the number we |
8377 | * originally calculated. Abuse current->journal_info for this. | 8384 | * originally calculated. Abuse current->journal_info for this. |
8378 | */ | 8385 | */ |
8379 | current->journal_info = &outstanding_extents; | 8386 | dio_data.reserve = round_up(count, root->sectorsize); |
8387 | current->journal_info = &dio_data; | ||
8380 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8388 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8381 | &BTRFS_I(inode)->runtime_flags)) { | 8389 | &BTRFS_I(inode)->runtime_flags)) { |
8382 | inode_dio_end(inode); | 8390 | inode_dio_end(inode); |
@@ -8391,16 +8399,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |||
8391 | if (iov_iter_rw(iter) == WRITE) { | 8399 | if (iov_iter_rw(iter) == WRITE) { |
8392 | current->journal_info = NULL; | 8400 | current->journal_info = NULL; |
8393 | if (ret < 0 && ret != -EIOCBQUEUED) { | 8401 | if (ret < 0 && ret != -EIOCBQUEUED) { |
8394 | /* | 8402 | if (dio_data.reserve) |
8395 | * If the error comes from submitting stage, | 8403 | btrfs_delalloc_release_space(inode, |
8396 | * btrfs_get_blocsk_direct() has free'd data space, | 8404 | dio_data.reserve); |
8397 | * and metadata space will be handled by | ||
8398 | * finish_ordered_fn, don't do that again to make | ||
8399 | * sure bytes_may_use is correct. | ||
8400 | */ | ||
8401 | if (!test_and_clear_bit(BTRFS_INODE_DIO_READY, | ||
8402 | &BTRFS_I(inode)->runtime_flags)) | ||
8403 | btrfs_delalloc_release_space(inode, count); | ||
8404 | } else if (ret >= 0 && (size_t)ret < count) | 8405 | } else if (ret >= 0 && (size_t)ret < count) |
8405 | btrfs_delalloc_release_space(inode, | 8406 | btrfs_delalloc_release_space(inode, |
8406 | count - (size_t)ret); | 8407 | count - (size_t)ret); |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0adf5422fce9..3e3e6130637f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -4639,6 +4639,11 @@ locked: | |||
4639 | bctl->flags |= BTRFS_BALANCE_TYPE_MASK; | 4639 | bctl->flags |= BTRFS_BALANCE_TYPE_MASK; |
4640 | } | 4640 | } |
4641 | 4641 | ||
4642 | if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { | ||
4643 | ret = -EINVAL; | ||
4644 | goto out_bargs; | ||
4645 | } | ||
4646 | |||
4642 | do_balance: | 4647 | do_balance: |
4643 | /* | 4648 | /* |
4644 | * Ownership of bctl and mutually_exclusive_operation_running | 4649 | * Ownership of bctl and mutually_exclusive_operation_running |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index aa72bfd28f7d..a739b825bdd3 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx, | |||
1920 | /* | 1920 | /* |
1921 | * We know that it is or will be overwritten. Check this now. | 1921 | * We know that it is or will be overwritten. Check this now. |
1922 | * The current inode being processed might have been the one that caused | 1922 | * The current inode being processed might have been the one that caused |
1923 | * inode 'ino' to be orphanized, therefore ow_inode can actually be the | 1923 | * inode 'ino' to be orphanized, therefore check if ow_inode matches |
1924 | * same as sctx->send_progress. | 1924 | * the current inode being processed. |
1925 | */ | 1925 | */ |
1926 | if (ow_inode <= sctx->send_progress) | 1926 | if ((ow_inode < sctx->send_progress) || |
1927 | (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && | ||
1928 | gen == sctx->cur_inode_gen)) | ||
1927 | ret = 1; | 1929 | ret = 1; |
1928 | else | 1930 | else |
1929 | ret = 0; | 1931 | ret = 0; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2b07b3581781..11d1eab9234d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1658,9 +1658,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) | |||
1658 | * groups on disk until we're mounted read-write again | 1658 | * groups on disk until we're mounted read-write again |
1659 | * unless we clean them up here. | 1659 | * unless we clean them up here. |
1660 | */ | 1660 | */ |
1661 | mutex_lock(&root->fs_info->cleaner_mutex); | ||
1662 | btrfs_delete_unused_bgs(fs_info); | 1661 | btrfs_delete_unused_bgs(fs_info); |
1663 | mutex_unlock(&root->fs_info->cleaner_mutex); | ||
1664 | 1662 | ||
1665 | btrfs_dev_replace_suspend_for_unmount(fs_info); | 1663 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
1666 | btrfs_scrub_cancel(fs_info); | 1664 | btrfs_scrub_cancel(fs_info); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8f259b3a66b3..a5b06442f0bf 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -117,6 +117,18 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans, | |||
117 | btrfs_unpin_free_ino(root); | 117 | btrfs_unpin_free_ino(root); |
118 | clear_btree_io_tree(&root->dirty_log_pages); | 118 | clear_btree_io_tree(&root->dirty_log_pages); |
119 | } | 119 | } |
120 | |||
121 | /* We can free old roots now. */ | ||
122 | spin_lock(&trans->dropped_roots_lock); | ||
123 | while (!list_empty(&trans->dropped_roots)) { | ||
124 | root = list_first_entry(&trans->dropped_roots, | ||
125 | struct btrfs_root, root_list); | ||
126 | list_del_init(&root->root_list); | ||
127 | spin_unlock(&trans->dropped_roots_lock); | ||
128 | btrfs_drop_and_free_fs_root(fs_info, root); | ||
129 | spin_lock(&trans->dropped_roots_lock); | ||
130 | } | ||
131 | spin_unlock(&trans->dropped_roots_lock); | ||
120 | up_write(&fs_info->commit_root_sem); | 132 | up_write(&fs_info->commit_root_sem); |
121 | } | 133 | } |
122 | 134 | ||
@@ -255,11 +267,13 @@ loop: | |||
255 | INIT_LIST_HEAD(&cur_trans->pending_ordered); | 267 | INIT_LIST_HEAD(&cur_trans->pending_ordered); |
256 | INIT_LIST_HEAD(&cur_trans->dirty_bgs); | 268 | INIT_LIST_HEAD(&cur_trans->dirty_bgs); |
257 | INIT_LIST_HEAD(&cur_trans->io_bgs); | 269 | INIT_LIST_HEAD(&cur_trans->io_bgs); |
270 | INIT_LIST_HEAD(&cur_trans->dropped_roots); | ||
258 | mutex_init(&cur_trans->cache_write_mutex); | 271 | mutex_init(&cur_trans->cache_write_mutex); |
259 | cur_trans->num_dirty_bgs = 0; | 272 | cur_trans->num_dirty_bgs = 0; |
260 | spin_lock_init(&cur_trans->dirty_bgs_lock); | 273 | spin_lock_init(&cur_trans->dirty_bgs_lock); |
261 | INIT_LIST_HEAD(&cur_trans->deleted_bgs); | 274 | INIT_LIST_HEAD(&cur_trans->deleted_bgs); |
262 | spin_lock_init(&cur_trans->deleted_bgs_lock); | 275 | spin_lock_init(&cur_trans->deleted_bgs_lock); |
276 | spin_lock_init(&cur_trans->dropped_roots_lock); | ||
263 | list_add_tail(&cur_trans->list, &fs_info->trans_list); | 277 | list_add_tail(&cur_trans->list, &fs_info->trans_list); |
264 | extent_io_tree_init(&cur_trans->dirty_pages, | 278 | extent_io_tree_init(&cur_trans->dirty_pages, |
265 | fs_info->btree_inode->i_mapping); | 279 | fs_info->btree_inode->i_mapping); |
@@ -336,6 +350,24 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, | |||
336 | } | 350 | } |
337 | 351 | ||
338 | 352 | ||
353 | void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, | ||
354 | struct btrfs_root *root) | ||
355 | { | ||
356 | struct btrfs_transaction *cur_trans = trans->transaction; | ||
357 | |||
358 | /* Add ourselves to the transaction dropped list */ | ||
359 | spin_lock(&cur_trans->dropped_roots_lock); | ||
360 | list_add_tail(&root->root_list, &cur_trans->dropped_roots); | ||
361 | spin_unlock(&cur_trans->dropped_roots_lock); | ||
362 | |||
363 | /* Make sure we don't try to update the root at commit time */ | ||
364 | spin_lock(&root->fs_info->fs_roots_radix_lock); | ||
365 | radix_tree_tag_clear(&root->fs_info->fs_roots_radix, | ||
366 | (unsigned long)root->root_key.objectid, | ||
367 | BTRFS_ROOT_TRANS_TAG); | ||
368 | spin_unlock(&root->fs_info->fs_roots_radix_lock); | ||
369 | } | ||
370 | |||
339 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, | 371 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, |
340 | struct btrfs_root *root) | 372 | struct btrfs_root *root) |
341 | { | 373 | { |
@@ -525,6 +557,7 @@ again: | |||
525 | h->delayed_ref_elem.seq = 0; | 557 | h->delayed_ref_elem.seq = 0; |
526 | h->type = type; | 558 | h->type = type; |
527 | h->allocating_chunk = false; | 559 | h->allocating_chunk = false; |
560 | h->can_flush_pending_bgs = true; | ||
528 | h->reloc_reserved = false; | 561 | h->reloc_reserved = false; |
529 | h->sync = false; | 562 | h->sync = false; |
530 | INIT_LIST_HEAD(&h->qgroup_ref_list); | 563 | INIT_LIST_HEAD(&h->qgroup_ref_list); |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index edc2fbc262d7..a994bb097ee5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -65,6 +65,7 @@ struct btrfs_transaction { | |||
65 | struct list_head switch_commits; | 65 | struct list_head switch_commits; |
66 | struct list_head dirty_bgs; | 66 | struct list_head dirty_bgs; |
67 | struct list_head io_bgs; | 67 | struct list_head io_bgs; |
68 | struct list_head dropped_roots; | ||
68 | u64 num_dirty_bgs; | 69 | u64 num_dirty_bgs; |
69 | 70 | ||
70 | /* | 71 | /* |
@@ -76,6 +77,7 @@ struct btrfs_transaction { | |||
76 | spinlock_t dirty_bgs_lock; | 77 | spinlock_t dirty_bgs_lock; |
77 | struct list_head deleted_bgs; | 78 | struct list_head deleted_bgs; |
78 | spinlock_t deleted_bgs_lock; | 79 | spinlock_t deleted_bgs_lock; |
80 | spinlock_t dropped_roots_lock; | ||
79 | struct btrfs_delayed_ref_root delayed_refs; | 81 | struct btrfs_delayed_ref_root delayed_refs; |
80 | int aborted; | 82 | int aborted; |
81 | int dirty_bg_run; | 83 | int dirty_bg_run; |
@@ -116,6 +118,7 @@ struct btrfs_trans_handle { | |||
116 | short aborted; | 118 | short aborted; |
117 | short adding_csums; | 119 | short adding_csums; |
118 | bool allocating_chunk; | 120 | bool allocating_chunk; |
121 | bool can_flush_pending_bgs; | ||
119 | bool reloc_reserved; | 122 | bool reloc_reserved; |
120 | bool sync; | 123 | bool sync; |
121 | unsigned int type; | 124 | unsigned int type; |
@@ -216,5 +219,6 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info); | |||
216 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); | 219 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); |
217 | void btrfs_put_transaction(struct btrfs_transaction *transaction); | 220 | void btrfs_put_transaction(struct btrfs_transaction *transaction); |
218 | void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); | 221 | void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); |
219 | 222 | void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, | |
223 | struct btrfs_root *root); | ||
220 | #endif | 224 | #endif |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2ca784a14e84..595279a8b99f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -376,6 +376,14 @@ struct map_lookup { | |||
376 | #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) | 376 | #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) |
377 | #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) | 377 | #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) |
378 | 378 | ||
379 | #define BTRFS_BALANCE_ARGS_MASK \ | ||
380 | (BTRFS_BALANCE_ARGS_PROFILES | \ | ||
381 | BTRFS_BALANCE_ARGS_USAGE | \ | ||
382 | BTRFS_BALANCE_ARGS_DEVID | \ | ||
383 | BTRFS_BALANCE_ARGS_DRANGE | \ | ||
384 | BTRFS_BALANCE_ARGS_VRANGE | \ | ||
385 | BTRFS_BALANCE_ARGS_LIMIT) | ||
386 | |||
379 | /* | 387 | /* |
380 | * Profile changing flags. When SOFT is set we won't relocate chunk if | 388 | * Profile changing flags. When SOFT is set we won't relocate chunk if |
381 | * it already has the target profile (even though it may be | 389 | * it already has the target profile (even though it may be |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index aa0dc2573374..afa09fce8151 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | 446 | ||
447 | /* Server has provided av pairs/target info in the type 2 challenge | ||
448 | * packet and we have plucked it and stored within smb session. | ||
449 | * We parse that blob here to find the server given timestamp | ||
450 | * as part of ntlmv2 authentication (or local current time as | ||
451 | * default in case of failure) | ||
452 | */ | ||
453 | static __le64 | ||
454 | find_timestamp(struct cifs_ses *ses) | ||
455 | { | ||
456 | unsigned int attrsize; | ||
457 | unsigned int type; | ||
458 | unsigned int onesize = sizeof(struct ntlmssp2_name); | ||
459 | unsigned char *blobptr; | ||
460 | unsigned char *blobend; | ||
461 | struct ntlmssp2_name *attrptr; | ||
462 | |||
463 | if (!ses->auth_key.len || !ses->auth_key.response) | ||
464 | return 0; | ||
465 | |||
466 | blobptr = ses->auth_key.response; | ||
467 | blobend = blobptr + ses->auth_key.len; | ||
468 | |||
469 | while (blobptr + onesize < blobend) { | ||
470 | attrptr = (struct ntlmssp2_name *) blobptr; | ||
471 | type = le16_to_cpu(attrptr->type); | ||
472 | if (type == NTLMSSP_AV_EOL) | ||
473 | break; | ||
474 | blobptr += 2; /* advance attr type */ | ||
475 | attrsize = le16_to_cpu(attrptr->length); | ||
476 | blobptr += 2; /* advance attr size */ | ||
477 | if (blobptr + attrsize > blobend) | ||
478 | break; | ||
479 | if (type == NTLMSSP_AV_TIMESTAMP) { | ||
480 | if (attrsize == sizeof(u64)) | ||
481 | return *((__le64 *)blobptr); | ||
482 | } | ||
483 | blobptr += attrsize; /* advance attr value */ | ||
484 | } | ||
485 | |||
486 | return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); | ||
487 | } | ||
488 | |||
447 | static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | 489 | static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, |
448 | const struct nls_table *nls_cp) | 490 | const struct nls_table *nls_cp) |
449 | { | 491 | { |
@@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
641 | struct ntlmv2_resp *ntlmv2; | 683 | struct ntlmv2_resp *ntlmv2; |
642 | char ntlmv2_hash[16]; | 684 | char ntlmv2_hash[16]; |
643 | unsigned char *tiblob = NULL; /* target info blob */ | 685 | unsigned char *tiblob = NULL; /* target info blob */ |
686 | __le64 rsp_timestamp; | ||
644 | 687 | ||
645 | if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { | 688 | if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { |
646 | if (!ses->domainName) { | 689 | if (!ses->domainName) { |
@@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
659 | } | 702 | } |
660 | } | 703 | } |
661 | 704 | ||
705 | /* Must be within 5 minutes of the server (or in range +/-2h | ||
706 | * in case of Mac OS X), so simply carry over server timestamp | ||
707 | * (as Windows 7 does) | ||
708 | */ | ||
709 | rsp_timestamp = find_timestamp(ses); | ||
710 | |||
662 | baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); | 711 | baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); |
663 | tilen = ses->auth_key.len; | 712 | tilen = ses->auth_key.len; |
664 | tiblob = ses->auth_key.response; | 713 | tiblob = ses->auth_key.response; |
@@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
675 | (ses->auth_key.response + CIFS_SESS_KEY_SIZE); | 724 | (ses->auth_key.response + CIFS_SESS_KEY_SIZE); |
676 | ntlmv2->blob_signature = cpu_to_le32(0x00000101); | 725 | ntlmv2->blob_signature = cpu_to_le32(0x00000101); |
677 | ntlmv2->reserved = 0; | 726 | ntlmv2->reserved = 0; |
678 | /* Must be within 5 minutes of the server */ | 727 | ntlmv2->time = rsp_timestamp; |
679 | ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); | 728 | |
680 | get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); | 729 | get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); |
681 | ntlmv2->reserved2 = 0; | 730 | ntlmv2->reserved2 = 0; |
682 | 731 | ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 6a1119e87fbb..e739950ca084 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -325,8 +325,11 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) | |||
325 | static void | 325 | static void |
326 | cifs_show_security(struct seq_file *s, struct cifs_ses *ses) | 326 | cifs_show_security(struct seq_file *s, struct cifs_ses *ses) |
327 | { | 327 | { |
328 | if (ses->sectype == Unspecified) | 328 | if (ses->sectype == Unspecified) { |
329 | if (ses->user_name == NULL) | ||
330 | seq_puts(s, ",sec=none"); | ||
329 | return; | 331 | return; |
332 | } | ||
330 | 333 | ||
331 | seq_puts(s, ",sec="); | 334 | seq_puts(s, ",sec="); |
332 | 335 | ||
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 27aea110e923..c3cc1609025f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
136 | extern const struct export_operations cifs_export_ops; | 136 | extern const struct export_operations cifs_export_ops; |
137 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ | 137 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ |
138 | 138 | ||
139 | #define CIFS_VERSION "2.07" | 139 | #define CIFS_VERSION "2.08" |
140 | #endif /* _CIFSFS_H */ | 140 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e2a6af1508af..62203c387db4 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | |||
3380 | struct page *page, *tpage; | 3380 | struct page *page, *tpage; |
3381 | unsigned int expected_index; | 3381 | unsigned int expected_index; |
3382 | int rc; | 3382 | int rc; |
3383 | gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); | ||
3383 | 3384 | ||
3384 | INIT_LIST_HEAD(tmplist); | 3385 | INIT_LIST_HEAD(tmplist); |
3385 | 3386 | ||
@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | |||
3392 | */ | 3393 | */ |
3393 | __set_page_locked(page); | 3394 | __set_page_locked(page); |
3394 | rc = add_to_page_cache_locked(page, mapping, | 3395 | rc = add_to_page_cache_locked(page, mapping, |
3395 | page->index, GFP_KERNEL); | 3396 | page->index, gfp); |
3396 | 3397 | ||
3397 | /* give up if we can't stick it in the cache */ | 3398 | /* give up if we can't stick it in the cache */ |
3398 | if (rc) { | 3399 | if (rc) { |
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | |||
3418 | break; | 3419 | break; |
3419 | 3420 | ||
3420 | __set_page_locked(page); | 3421 | __set_page_locked(page); |
3421 | if (add_to_page_cache_locked(page, mapping, page->index, | 3422 | if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { |
3422 | GFP_KERNEL)) { | ||
3423 | __clear_page_locked(page); | 3423 | __clear_page_locked(page); |
3424 | break; | 3424 | break; |
3425 | } | 3425 | } |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f621b44cb800..6b66dd5d1540 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
2034 | struct tcon_link *tlink = NULL; | 2034 | struct tcon_link *tlink = NULL; |
2035 | struct cifs_tcon *tcon = NULL; | 2035 | struct cifs_tcon *tcon = NULL; |
2036 | struct TCP_Server_Info *server; | 2036 | struct TCP_Server_Info *server; |
2037 | struct cifs_io_parms io_parms; | ||
2038 | 2037 | ||
2039 | /* | 2038 | /* |
2040 | * To avoid spurious oplock breaks from server, in the case of | 2039 | * To avoid spurious oplock breaks from server, in the case of |
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
2056 | rc = -ENOSYS; | 2055 | rc = -ENOSYS; |
2057 | cifsFileInfo_put(open_file); | 2056 | cifsFileInfo_put(open_file); |
2058 | cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); | 2057 | cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); |
2059 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | ||
2060 | unsigned int bytes_written; | ||
2061 | |||
2062 | io_parms.netfid = open_file->fid.netfid; | ||
2063 | io_parms.pid = open_file->pid; | ||
2064 | io_parms.tcon = tcon; | ||
2065 | io_parms.offset = 0; | ||
2066 | io_parms.length = attrs->ia_size; | ||
2067 | rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, | ||
2068 | NULL, NULL, 1); | ||
2069 | cifs_dbg(FYI, "Wrt seteof rc %d\n", rc); | ||
2070 | } | ||
2071 | } else | 2058 | } else |
2072 | rc = -EINVAL; | 2059 | rc = -EINVAL; |
2073 | 2060 | ||
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
2093 | else | 2080 | else |
2094 | rc = -ENOSYS; | 2081 | rc = -ENOSYS; |
2095 | cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); | 2082 | cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); |
2096 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | ||
2097 | __u16 netfid; | ||
2098 | int oplock = 0; | ||
2099 | 2083 | ||
2100 | rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN, | ||
2101 | GENERIC_WRITE, CREATE_NOT_DIR, &netfid, | ||
2102 | &oplock, NULL, cifs_sb->local_nls, | ||
2103 | cifs_remap(cifs_sb)); | ||
2104 | if (rc == 0) { | ||
2105 | unsigned int bytes_written; | ||
2106 | |||
2107 | io_parms.netfid = netfid; | ||
2108 | io_parms.pid = current->tgid; | ||
2109 | io_parms.tcon = tcon; | ||
2110 | io_parms.offset = 0; | ||
2111 | io_parms.length = attrs->ia_size; | ||
2112 | rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL, | ||
2113 | NULL, 1); | ||
2114 | cifs_dbg(FYI, "wrt seteof rc %d\n", rc); | ||
2115 | CIFSSMBClose(xid, tcon, netfid); | ||
2116 | } | ||
2117 | } | ||
2118 | if (tlink) | 2084 | if (tlink) |
2119 | cifs_put_tlink(tlink); | 2085 | cifs_put_tlink(tlink); |
2120 | 2086 | ||
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index c63f5227b681..28a77bf1d559 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, | |||
67 | goto out_drop_write; | 67 | goto out_drop_write; |
68 | } | 68 | } |
69 | 69 | ||
70 | if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { | ||
71 | rc = -EBADF; | ||
72 | cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); | ||
73 | goto out_fput; | ||
74 | } | ||
75 | |||
70 | if ((!src_file.file->private_data) || (!dst_file->private_data)) { | 76 | if ((!src_file.file->private_data) || (!dst_file->private_data)) { |
71 | rc = -EBADF; | 77 | rc = -EBADF; |
72 | cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); | 78 | cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index df91bcf56d67..18da19f4f811 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server) | |||
50 | break; | 50 | break; |
51 | default: | 51 | default: |
52 | server->echoes = true; | 52 | server->echoes = true; |
53 | server->oplocks = true; | 53 | if (enable_oplocks) { |
54 | server->oplocks = true; | ||
55 | server->oplock_credits = 1; | ||
56 | } else | ||
57 | server->oplocks = false; | ||
58 | |||
54 | server->echo_credits = 1; | 59 | server->echo_credits = 1; |
55 | server->oplock_credits = 1; | ||
56 | } | 60 | } |
57 | server->credits -= server->echo_credits + server->oplock_credits; | 61 | server->credits -= server->echo_credits + server->oplock_credits; |
58 | return 0; | 62 | return 0; |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 070fb2ad85ce..597a417ba94d 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "smb2status.h" | 46 | #include "smb2status.h" |
47 | #include "smb2glob.h" | 47 | #include "smb2glob.h" |
48 | #include "cifspdu.h" | 48 | #include "cifspdu.h" |
49 | #include "cifs_spnego.h" | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * The following table defines the expected "StructureSize" of SMB2 requests | 52 | * The following table defines the expected "StructureSize" of SMB2 requests |
@@ -486,19 +487,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
486 | cifs_dbg(FYI, "missing security blob on negprot\n"); | 487 | cifs_dbg(FYI, "missing security blob on negprot\n"); |
487 | 488 | ||
488 | rc = cifs_enable_signing(server, ses->sign); | 489 | rc = cifs_enable_signing(server, ses->sign); |
489 | #ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */ | ||
490 | if (rc) | 490 | if (rc) |
491 | goto neg_exit; | 491 | goto neg_exit; |
492 | if (blob_length) | 492 | if (blob_length) { |
493 | rc = decode_negTokenInit(security_blob, blob_length, server); | 493 | rc = decode_negTokenInit(security_blob, blob_length, server); |
494 | if (rc == 1) | 494 | if (rc == 1) |
495 | rc = 0; | 495 | rc = 0; |
496 | else if (rc == 0) { | 496 | else if (rc == 0) |
497 | rc = -EIO; | 497 | rc = -EIO; |
498 | goto neg_exit; | ||
499 | } | 498 | } |
500 | #endif | ||
501 | |||
502 | neg_exit: | 499 | neg_exit: |
503 | free_rsp_buf(resp_buftype, rsp); | 500 | free_rsp_buf(resp_buftype, rsp); |
504 | return rc; | 501 | return rc; |
@@ -592,7 +589,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, | |||
592 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ | 589 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ |
593 | struct TCP_Server_Info *server = ses->server; | 590 | struct TCP_Server_Info *server = ses->server; |
594 | u16 blob_length = 0; | 591 | u16 blob_length = 0; |
595 | char *security_blob; | 592 | struct key *spnego_key = NULL; |
593 | char *security_blob = NULL; | ||
596 | char *ntlmssp_blob = NULL; | 594 | char *ntlmssp_blob = NULL; |
597 | bool use_spnego = false; /* else use raw ntlmssp */ | 595 | bool use_spnego = false; /* else use raw ntlmssp */ |
598 | 596 | ||
@@ -620,7 +618,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, | |||
620 | ses->ntlmssp->sesskey_per_smbsess = true; | 618 | ses->ntlmssp->sesskey_per_smbsess = true; |
621 | 619 | ||
622 | /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */ | 620 | /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */ |
623 | ses->sectype = RawNTLMSSP; | 621 | if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) |
622 | ses->sectype = RawNTLMSSP; | ||
624 | 623 | ||
625 | ssetup_ntlmssp_authenticate: | 624 | ssetup_ntlmssp_authenticate: |
626 | if (phase == NtLmChallenge) | 625 | if (phase == NtLmChallenge) |
@@ -649,7 +648,48 @@ ssetup_ntlmssp_authenticate: | |||
649 | iov[0].iov_base = (char *)req; | 648 | iov[0].iov_base = (char *)req; |
650 | /* 4 for rfc1002 length field and 1 for pad */ | 649 | /* 4 for rfc1002 length field and 1 for pad */ |
651 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 650 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; |
652 | if (phase == NtLmNegotiate) { | 651 | |
652 | if (ses->sectype == Kerberos) { | ||
653 | #ifdef CONFIG_CIFS_UPCALL | ||
654 | struct cifs_spnego_msg *msg; | ||
655 | |||
656 | spnego_key = cifs_get_spnego_key(ses); | ||
657 | if (IS_ERR(spnego_key)) { | ||
658 | rc = PTR_ERR(spnego_key); | ||
659 | spnego_key = NULL; | ||
660 | goto ssetup_exit; | ||
661 | } | ||
662 | |||
663 | msg = spnego_key->payload.data; | ||
664 | /* | ||
665 | * check version field to make sure that cifs.upcall is | ||
666 | * sending us a response in an expected form | ||
667 | */ | ||
668 | if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { | ||
669 | cifs_dbg(VFS, | ||
670 | "bad cifs.upcall version. Expected %d got %d", | ||
671 | CIFS_SPNEGO_UPCALL_VERSION, msg->version); | ||
672 | rc = -EKEYREJECTED; | ||
673 | goto ssetup_exit; | ||
674 | } | ||
675 | ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, | ||
676 | GFP_KERNEL); | ||
677 | if (!ses->auth_key.response) { | ||
678 | cifs_dbg(VFS, | ||
679 | "Kerberos can't allocate (%u bytes) memory", | ||
680 | msg->sesskey_len); | ||
681 | rc = -ENOMEM; | ||
682 | goto ssetup_exit; | ||
683 | } | ||
684 | ses->auth_key.len = msg->sesskey_len; | ||
685 | blob_length = msg->secblob_len; | ||
686 | iov[1].iov_base = msg->data + msg->sesskey_len; | ||
687 | iov[1].iov_len = blob_length; | ||
688 | #else | ||
689 | rc = -EOPNOTSUPP; | ||
690 | goto ssetup_exit; | ||
691 | #endif /* CONFIG_CIFS_UPCALL */ | ||
692 | } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */ | ||
653 | ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), | 693 | ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), |
654 | GFP_KERNEL); | 694 | GFP_KERNEL); |
655 | if (ntlmssp_blob == NULL) { | 695 | if (ntlmssp_blob == NULL) { |
@@ -672,6 +712,8 @@ ssetup_ntlmssp_authenticate: | |||
672 | /* with raw NTLMSSP we don't encapsulate in SPNEGO */ | 712 | /* with raw NTLMSSP we don't encapsulate in SPNEGO */ |
673 | security_blob = ntlmssp_blob; | 713 | security_blob = ntlmssp_blob; |
674 | } | 714 | } |
715 | iov[1].iov_base = security_blob; | ||
716 | iov[1].iov_len = blob_length; | ||
675 | } else if (phase == NtLmAuthenticate) { | 717 | } else if (phase == NtLmAuthenticate) { |
676 | req->hdr.SessionId = ses->Suid; | 718 | req->hdr.SessionId = ses->Suid; |
677 | ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, | 719 | ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, |
@@ -699,6 +741,8 @@ ssetup_ntlmssp_authenticate: | |||
699 | } else { | 741 | } else { |
700 | security_blob = ntlmssp_blob; | 742 | security_blob = ntlmssp_blob; |
701 | } | 743 | } |
744 | iov[1].iov_base = security_blob; | ||
745 | iov[1].iov_len = blob_length; | ||
702 | } else { | 746 | } else { |
703 | cifs_dbg(VFS, "illegal ntlmssp phase\n"); | 747 | cifs_dbg(VFS, "illegal ntlmssp phase\n"); |
704 | rc = -EIO; | 748 | rc = -EIO; |
@@ -710,8 +754,6 @@ ssetup_ntlmssp_authenticate: | |||
710 | cpu_to_le16(sizeof(struct smb2_sess_setup_req) - | 754 | cpu_to_le16(sizeof(struct smb2_sess_setup_req) - |
711 | 1 /* pad */ - 4 /* rfc1001 len */); | 755 | 1 /* pad */ - 4 /* rfc1001 len */); |
712 | req->SecurityBufferLength = cpu_to_le16(blob_length); | 756 | req->SecurityBufferLength = cpu_to_le16(blob_length); |
713 | iov[1].iov_base = security_blob; | ||
714 | iov[1].iov_len = blob_length; | ||
715 | 757 | ||
716 | inc_rfc1001_len(req, blob_length - 1 /* pad */); | 758 | inc_rfc1001_len(req, blob_length - 1 /* pad */); |
717 | 759 | ||
@@ -722,6 +764,7 @@ ssetup_ntlmssp_authenticate: | |||
722 | 764 | ||
723 | kfree(security_blob); | 765 | kfree(security_blob); |
724 | rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base; | 766 | rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base; |
767 | ses->Suid = rsp->hdr.SessionId; | ||
725 | if (resp_buftype != CIFS_NO_BUFFER && | 768 | if (resp_buftype != CIFS_NO_BUFFER && |
726 | rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) { | 769 | rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) { |
727 | if (phase != NtLmNegotiate) { | 770 | if (phase != NtLmNegotiate) { |
@@ -739,7 +782,6 @@ ssetup_ntlmssp_authenticate: | |||
739 | /* NTLMSSP Negotiate sent now processing challenge (response) */ | 782 | /* NTLMSSP Negotiate sent now processing challenge (response) */ |
740 | phase = NtLmChallenge; /* process ntlmssp challenge */ | 783 | phase = NtLmChallenge; /* process ntlmssp challenge */ |
741 | rc = 0; /* MORE_PROCESSING is not an error here but expected */ | 784 | rc = 0; /* MORE_PROCESSING is not an error here but expected */ |
742 | ses->Suid = rsp->hdr.SessionId; | ||
743 | rc = decode_ntlmssp_challenge(rsp->Buffer, | 785 | rc = decode_ntlmssp_challenge(rsp->Buffer, |
744 | le16_to_cpu(rsp->SecurityBufferLength), ses); | 786 | le16_to_cpu(rsp->SecurityBufferLength), ses); |
745 | } | 787 | } |
@@ -796,6 +838,10 @@ keygen_exit: | |||
796 | kfree(ses->auth_key.response); | 838 | kfree(ses->auth_key.response); |
797 | ses->auth_key.response = NULL; | 839 | ses->auth_key.response = NULL; |
798 | } | 840 | } |
841 | if (spnego_key) { | ||
842 | key_invalidate(spnego_key); | ||
843 | key_put(spnego_key); | ||
844 | } | ||
799 | kfree(ses->ntlmssp); | 845 | kfree(ses->ntlmssp); |
800 | 846 | ||
801 | return rc; | 847 | return rc; |
@@ -876,6 +922,12 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
876 | if (tcon && tcon->bad_network_name) | 922 | if (tcon && tcon->bad_network_name) |
877 | return -ENOENT; | 923 | return -ENOENT; |
878 | 924 | ||
925 | if ((tcon && tcon->seal) && | ||
926 | ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { | ||
927 | cifs_dbg(VFS, "encryption requested but no server support"); | ||
928 | return -EOPNOTSUPP; | ||
929 | } | ||
930 | |||
879 | unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); | 931 | unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); |
880 | if (unc_path == NULL) | 932 | if (unc_path == NULL) |
881 | return -ENOMEM; | 933 | return -ENOMEM; |
@@ -955,6 +1007,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
955 | ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) | 1007 | ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) |
956 | cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); | 1008 | cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); |
957 | init_copy_chunk_defaults(tcon); | 1009 | init_copy_chunk_defaults(tcon); |
1010 | if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA) | ||
1011 | cifs_dbg(VFS, "Encrypted shares not supported"); | ||
958 | if (tcon->ses->server->ops->validate_negotiate) | 1012 | if (tcon->ses->server->ops->validate_negotiate) |
959 | rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); | 1013 | rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); |
960 | tcon_exit: | 1014 | tcon_exit: |
@@ -119,7 +119,8 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
119 | size_t len; | 119 | size_t len; |
120 | if (pos == max) { | 120 | if (pos == max) { |
121 | unsigned blkbits = inode->i_blkbits; | 121 | unsigned blkbits = inode->i_blkbits; |
122 | sector_t block = pos >> blkbits; | 122 | long page = pos >> PAGE_SHIFT; |
123 | sector_t block = page << (PAGE_SHIFT - blkbits); | ||
123 | unsigned first = pos - (block << blkbits); | 124 | unsigned first = pos - (block << blkbits); |
124 | long size; | 125 | long size; |
125 | 126 | ||
@@ -284,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh, | |||
284 | static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | 285 | static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, |
285 | struct vm_area_struct *vma, struct vm_fault *vmf) | 286 | struct vm_area_struct *vma, struct vm_fault *vmf) |
286 | { | 287 | { |
288 | struct address_space *mapping = inode->i_mapping; | ||
287 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); | 289 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); |
288 | unsigned long vaddr = (unsigned long)vmf->virtual_address; | 290 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
289 | void __pmem *addr; | 291 | void __pmem *addr; |
@@ -291,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |||
291 | pgoff_t size; | 293 | pgoff_t size; |
292 | int error; | 294 | int error; |
293 | 295 | ||
296 | i_mmap_lock_read(mapping); | ||
297 | |||
294 | /* | 298 | /* |
295 | * Check truncate didn't happen while we were allocating a block. | 299 | * Check truncate didn't happen while we were allocating a block. |
296 | * If it did, this block may or may not be still allocated to the | 300 | * If it did, this block may or may not be still allocated to the |
@@ -320,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |||
320 | error = vm_insert_mixed(vma, vaddr, pfn); | 324 | error = vm_insert_mixed(vma, vaddr, pfn); |
321 | 325 | ||
322 | out: | 326 | out: |
327 | i_mmap_unlock_read(mapping); | ||
328 | |||
323 | return error; | 329 | return error; |
324 | } | 330 | } |
325 | 331 | ||
@@ -381,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
381 | * from a read fault and we've raced with a truncate | 387 | * from a read fault and we've raced with a truncate |
382 | */ | 388 | */ |
383 | error = -EIO; | 389 | error = -EIO; |
384 | goto unlock; | 390 | goto unlock_page; |
385 | } | 391 | } |
386 | } else { | ||
387 | i_mmap_lock_write(mapping); | ||
388 | } | 392 | } |
389 | 393 | ||
390 | error = get_block(inode, block, &bh, 0); | 394 | error = get_block(inode, block, &bh, 0); |
391 | if (!error && (bh.b_size < PAGE_SIZE)) | 395 | if (!error && (bh.b_size < PAGE_SIZE)) |
392 | error = -EIO; /* fs corruption? */ | 396 | error = -EIO; /* fs corruption? */ |
393 | if (error) | 397 | if (error) |
394 | goto unlock; | 398 | goto unlock_page; |
395 | 399 | ||
396 | if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { | 400 | if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { |
397 | if (vmf->flags & FAULT_FLAG_WRITE) { | 401 | if (vmf->flags & FAULT_FLAG_WRITE) { |
@@ -402,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
402 | if (!error && (bh.b_size < PAGE_SIZE)) | 406 | if (!error && (bh.b_size < PAGE_SIZE)) |
403 | error = -EIO; | 407 | error = -EIO; |
404 | if (error) | 408 | if (error) |
405 | goto unlock; | 409 | goto unlock_page; |
406 | } else { | 410 | } else { |
407 | i_mmap_unlock_write(mapping); | ||
408 | return dax_load_hole(mapping, page, vmf); | 411 | return dax_load_hole(mapping, page, vmf); |
409 | } | 412 | } |
410 | } | 413 | } |
@@ -416,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
416 | else | 419 | else |
417 | clear_user_highpage(new_page, vaddr); | 420 | clear_user_highpage(new_page, vaddr); |
418 | if (error) | 421 | if (error) |
419 | goto unlock; | 422 | goto unlock_page; |
420 | vmf->page = page; | 423 | vmf->page = page; |
421 | if (!page) { | 424 | if (!page) { |
425 | i_mmap_lock_read(mapping); | ||
422 | /* Check we didn't race with truncate */ | 426 | /* Check we didn't race with truncate */ |
423 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> | 427 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> |
424 | PAGE_SHIFT; | 428 | PAGE_SHIFT; |
425 | if (vmf->pgoff >= size) { | 429 | if (vmf->pgoff >= size) { |
430 | i_mmap_unlock_read(mapping); | ||
426 | error = -EIO; | 431 | error = -EIO; |
427 | goto unlock; | 432 | goto out; |
428 | } | 433 | } |
429 | } | 434 | } |
430 | return VM_FAULT_LOCKED; | 435 | return VM_FAULT_LOCKED; |
@@ -460,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
460 | WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); | 465 | WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); |
461 | } | 466 | } |
462 | 467 | ||
463 | if (!page) | ||
464 | i_mmap_unlock_write(mapping); | ||
465 | out: | 468 | out: |
466 | if (error == -ENOMEM) | 469 | if (error == -ENOMEM) |
467 | return VM_FAULT_OOM | major; | 470 | return VM_FAULT_OOM | major; |
@@ -470,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
470 | return VM_FAULT_SIGBUS | major; | 473 | return VM_FAULT_SIGBUS | major; |
471 | return VM_FAULT_NOPAGE | major; | 474 | return VM_FAULT_NOPAGE | major; |
472 | 475 | ||
473 | unlock: | 476 | unlock_page: |
474 | if (page) { | 477 | if (page) { |
475 | unlock_page(page); | 478 | unlock_page(page); |
476 | page_cache_release(page); | 479 | page_cache_release(page); |
477 | } else { | ||
478 | i_mmap_unlock_write(mapping); | ||
479 | } | 480 | } |
480 | |||
481 | goto out; | 481 | goto out; |
482 | } | 482 | } |
483 | EXPORT_SYMBOL(__dax_fault); | 483 | EXPORT_SYMBOL(__dax_fault); |
@@ -555,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
555 | block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); | 555 | block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); |
556 | 556 | ||
557 | bh.b_size = PMD_SIZE; | 557 | bh.b_size = PMD_SIZE; |
558 | i_mmap_lock_write(mapping); | ||
559 | length = get_block(inode, block, &bh, write); | 558 | length = get_block(inode, block, &bh, write); |
560 | if (length) | 559 | if (length) |
561 | return VM_FAULT_SIGBUS; | 560 | return VM_FAULT_SIGBUS; |
561 | i_mmap_lock_read(mapping); | ||
562 | 562 | ||
563 | /* | 563 | /* |
564 | * If the filesystem isn't willing to tell us the length of a hole, | 564 | * If the filesystem isn't willing to tell us the length of a hole, |
@@ -568,24 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
568 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | 568 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) |
569 | goto fallback; | 569 | goto fallback; |
570 | 570 | ||
571 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { | ||
572 | int i; | ||
573 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
574 | clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); | ||
575 | wmb_pmem(); | ||
576 | count_vm_event(PGMAJFAULT); | ||
577 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
578 | result |= VM_FAULT_MAJOR; | ||
579 | } | ||
580 | |||
581 | /* | 571 | /* |
582 | * If we allocated new storage, make sure no process has any | 572 | * If we allocated new storage, make sure no process has any |
583 | * zero pages covering this hole | 573 | * zero pages covering this hole |
584 | */ | 574 | */ |
585 | if (buffer_new(&bh)) { | 575 | if (buffer_new(&bh)) { |
586 | i_mmap_unlock_write(mapping); | 576 | i_mmap_unlock_read(mapping); |
587 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); | 577 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); |
588 | i_mmap_lock_write(mapping); | 578 | i_mmap_lock_read(mapping); |
589 | } | 579 | } |
590 | 580 | ||
591 | /* | 581 | /* |
@@ -632,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
632 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | 622 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) |
633 | goto fallback; | 623 | goto fallback; |
634 | 624 | ||
625 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { | ||
626 | int i; | ||
627 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
628 | clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); | ||
629 | wmb_pmem(); | ||
630 | count_vm_event(PGMAJFAULT); | ||
631 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
632 | result |= VM_FAULT_MAJOR; | ||
633 | } | ||
634 | |||
635 | result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); | 635 | result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); |
636 | } | 636 | } |
637 | 637 | ||
638 | out: | 638 | out: |
639 | i_mmap_unlock_read(mapping); | ||
640 | |||
639 | if (buffer_unwritten(&bh)) | 641 | if (buffer_unwritten(&bh)) |
640 | complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); | 642 | complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); |
641 | 643 | ||
642 | i_mmap_unlock_write(mapping); | ||
643 | |||
644 | return result; | 644 | return result; |
645 | 645 | ||
646 | fallback: | 646 | fallback: |
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 47728da7702c..b46e9fc64196 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
@@ -63,7 +63,7 @@ config EXT4_FS | |||
63 | If unsure, say N. | 63 | If unsure, say N. |
64 | 64 | ||
65 | config EXT4_USE_FOR_EXT2 | 65 | config EXT4_USE_FOR_EXT2 |
66 | bool "Use ext4 for ext2/ext3 file systems" | 66 | bool "Use ext4 for ext2 file systems" |
67 | depends on EXT4_FS | 67 | depends on EXT4_FS |
68 | depends on EXT2_FS=n | 68 | depends on EXT2_FS=n |
69 | default y | 69 | default y |
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index e26803fb210d..560af0437704 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c | |||
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
165 | if (pages) { | 165 | if (pages) { |
166 | page = list_entry(pages->prev, struct page, lru); | 166 | page = list_entry(pages->prev, struct page, lru); |
167 | list_del(&page->lru); | 167 | list_del(&page->lru); |
168 | if (add_to_page_cache_lru(page, mapping, | 168 | if (add_to_page_cache_lru(page, mapping, page->index, |
169 | page->index, GFP_KERNEL)) | 169 | GFP_KERNEL & mapping_gfp_mask(mapping))) |
170 | goto next_page; | 170 | goto next_page; |
171 | } | 171 | } |
172 | 172 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 587ac08eabb6..091a36444972 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -1481,6 +1481,21 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
1481 | wbc_detach_inode(&wbc); | 1481 | wbc_detach_inode(&wbc); |
1482 | work->nr_pages -= write_chunk - wbc.nr_to_write; | 1482 | work->nr_pages -= write_chunk - wbc.nr_to_write; |
1483 | wrote += write_chunk - wbc.nr_to_write; | 1483 | wrote += write_chunk - wbc.nr_to_write; |
1484 | |||
1485 | if (need_resched()) { | ||
1486 | /* | ||
1487 | * We're trying to balance between building up a nice | ||
1488 | * long list of IOs to improve our merge rate, and | ||
1489 | * getting those IOs out quickly for anyone throttling | ||
1490 | * in balance_dirty_pages(). cond_resched() doesn't | ||
1491 | * unplug, so get our IOs out the door before we | ||
1492 | * give up the CPU. | ||
1493 | */ | ||
1494 | blk_flush_plug(current); | ||
1495 | cond_resched(); | ||
1496 | } | ||
1497 | |||
1498 | |||
1484 | spin_lock(&wb->list_lock); | 1499 | spin_lock(&wb->list_lock); |
1485 | spin_lock(&inode->i_lock); | 1500 | spin_lock(&inode->i_lock); |
1486 | if (!(inode->i_state & I_DIRTY_ALL)) | 1501 | if (!(inode->i_state & I_DIRTY_ALL)) |
@@ -1488,7 +1503,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
1488 | requeue_inode(inode, wb, &wbc); | 1503 | requeue_inode(inode, wb, &wbc); |
1489 | inode_sync_complete(inode); | 1504 | inode_sync_complete(inode); |
1490 | spin_unlock(&inode->i_lock); | 1505 | spin_unlock(&inode->i_lock); |
1491 | cond_resched_lock(&wb->list_lock); | 1506 | |
1492 | /* | 1507 | /* |
1493 | * bail out to wb_writeback() often enough to check | 1508 | * bail out to wb_writeback() often enough to check |
1494 | * background threshold and other termination conditions. | 1509 | * background threshold and other termination conditions. |
diff --git a/fs/mpage.c b/fs/mpage.c index 778a4ddef77a..a7c34274f207 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |||
139 | static struct bio * | 139 | static struct bio * |
140 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 140 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
141 | sector_t *last_block_in_bio, struct buffer_head *map_bh, | 141 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
142 | unsigned long *first_logical_block, get_block_t get_block) | 142 | unsigned long *first_logical_block, get_block_t get_block, |
143 | gfp_t gfp) | ||
143 | { | 144 | { |
144 | struct inode *inode = page->mapping->host; | 145 | struct inode *inode = page->mapping->host; |
145 | const unsigned blkbits = inode->i_blkbits; | 146 | const unsigned blkbits = inode->i_blkbits; |
@@ -277,8 +278,7 @@ alloc_new: | |||
277 | goto out; | 278 | goto out; |
278 | } | 279 | } |
279 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 280 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
280 | min_t(int, nr_pages, BIO_MAX_PAGES), | 281 | min_t(int, nr_pages, BIO_MAX_PAGES), gfp); |
281 | GFP_KERNEL); | ||
282 | if (bio == NULL) | 282 | if (bio == NULL) |
283 | goto confused; | 283 | goto confused; |
284 | } | 284 | } |
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
361 | sector_t last_block_in_bio = 0; | 361 | sector_t last_block_in_bio = 0; |
362 | struct buffer_head map_bh; | 362 | struct buffer_head map_bh; |
363 | unsigned long first_logical_block = 0; | 363 | unsigned long first_logical_block = 0; |
364 | gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); | ||
364 | 365 | ||
365 | map_bh.b_state = 0; | 366 | map_bh.b_state = 0; |
366 | map_bh.b_size = 0; | 367 | map_bh.b_size = 0; |
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
370 | prefetchw(&page->flags); | 371 | prefetchw(&page->flags); |
371 | list_del(&page->lru); | 372 | list_del(&page->lru); |
372 | if (!add_to_page_cache_lru(page, mapping, | 373 | if (!add_to_page_cache_lru(page, mapping, |
373 | page->index, GFP_KERNEL)) { | 374 | page->index, |
375 | gfp)) { | ||
374 | bio = do_mpage_readpage(bio, page, | 376 | bio = do_mpage_readpage(bio, page, |
375 | nr_pages - page_idx, | 377 | nr_pages - page_idx, |
376 | &last_block_in_bio, &map_bh, | 378 | &last_block_in_bio, &map_bh, |
377 | &first_logical_block, | 379 | &first_logical_block, |
378 | get_block); | 380 | get_block, gfp); |
379 | } | 381 | } |
380 | page_cache_release(page); | 382 | page_cache_release(page); |
381 | } | 383 | } |
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) | |||
395 | sector_t last_block_in_bio = 0; | 397 | sector_t last_block_in_bio = 0; |
396 | struct buffer_head map_bh; | 398 | struct buffer_head map_bh; |
397 | unsigned long first_logical_block = 0; | 399 | unsigned long first_logical_block = 0; |
400 | gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping); | ||
398 | 401 | ||
399 | map_bh.b_state = 0; | 402 | map_bh.b_state = 0; |
400 | map_bh.b_size = 0; | 403 | map_bh.b_size = 0; |
401 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, | 404 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
402 | &map_bh, &first_logical_block, get_block); | 405 | &map_bh, &first_logical_block, get_block, gfp); |
403 | if (bio) | 406 | if (bio) |
404 | mpage_bio_submit(READ, bio); | 407 | mpage_bio_submit(READ, bio); |
405 | return 0; | 408 | return 0; |
diff --git a/fs/namei.c b/fs/namei.c index 726d211db484..33e9495a3129 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd, | |||
1558 | negative = d_is_negative(dentry); | 1558 | negative = d_is_negative(dentry); |
1559 | if (read_seqcount_retry(&dentry->d_seq, seq)) | 1559 | if (read_seqcount_retry(&dentry->d_seq, seq)) |
1560 | return -ECHILD; | 1560 | return -ECHILD; |
1561 | if (negative) | ||
1562 | return -ENOENT; | ||
1563 | 1561 | ||
1564 | /* | 1562 | /* |
1565 | * This sequence count validates that the parent had no | 1563 | * This sequence count validates that the parent had no |
@@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd, | |||
1580 | goto unlazy; | 1578 | goto unlazy; |
1581 | } | 1579 | } |
1582 | } | 1580 | } |
1581 | /* | ||
1582 | * Note: do negative dentry check after revalidation in | ||
1583 | * case that drops it. | ||
1584 | */ | ||
1585 | if (negative) | ||
1586 | return -ENOENT; | ||
1583 | path->mnt = mnt; | 1587 | path->mnt = mnt; |
1584 | path->dentry = dentry; | 1588 | path->dentry = dentry; |
1585 | if (likely(__follow_mount_rcu(nd, path, inode, seqp))) | 1589 | if (likely(__follow_mount_rcu(nd, path, inode, seqp))) |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 2714ef835bdd..be806ead7f4d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -113,7 +113,8 @@ out: | |||
113 | return status; | 113 | return status; |
114 | } | 114 | } |
115 | 115 | ||
116 | static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) | 116 | static int nfs_delegation_claim_opens(struct inode *inode, |
117 | const nfs4_stateid *stateid, fmode_t type) | ||
117 | { | 118 | { |
118 | struct nfs_inode *nfsi = NFS_I(inode); | 119 | struct nfs_inode *nfsi = NFS_I(inode); |
119 | struct nfs_open_context *ctx; | 120 | struct nfs_open_context *ctx; |
@@ -140,7 +141,7 @@ again: | |||
140 | /* Block nfs4_proc_unlck */ | 141 | /* Block nfs4_proc_unlck */ |
141 | mutex_lock(&sp->so_delegreturn_mutex); | 142 | mutex_lock(&sp->so_delegreturn_mutex); |
142 | seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); | 143 | seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); |
143 | err = nfs4_open_delegation_recall(ctx, state, stateid); | 144 | err = nfs4_open_delegation_recall(ctx, state, stateid, type); |
144 | if (!err) | 145 | if (!err) |
145 | err = nfs_delegation_claim_locks(ctx, state, stateid); | 146 | err = nfs_delegation_claim_locks(ctx, state, stateid); |
146 | if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) | 147 | if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) |
@@ -411,7 +412,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation | |||
411 | do { | 412 | do { |
412 | if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) | 413 | if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) |
413 | break; | 414 | break; |
414 | err = nfs_delegation_claim_opens(inode, &delegation->stateid); | 415 | err = nfs_delegation_claim_opens(inode, &delegation->stateid, |
416 | delegation->type); | ||
415 | if (!issync || err != -EAGAIN) | 417 | if (!issync || err != -EAGAIN) |
416 | break; | 418 | break; |
417 | /* | 419 | /* |
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index a44829173e57..333063e032f0 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h | |||
@@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp); | |||
54 | 54 | ||
55 | /* NFSv4 delegation-related procedures */ | 55 | /* NFSv4 delegation-related procedures */ |
56 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); | 56 | int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); |
57 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); | 57 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type); |
58 | int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); | 58 | int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); |
59 | bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); | 59 | bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); |
60 | 60 | ||
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 38678d9a5cc4..4b1d08f56aba 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -166,8 +166,11 @@ nfs_direct_select_verf(struct nfs_direct_req *dreq, | |||
166 | struct nfs_writeverf *verfp = &dreq->verf; | 166 | struct nfs_writeverf *verfp = &dreq->verf; |
167 | 167 | ||
168 | #ifdef CONFIG_NFS_V4_1 | 168 | #ifdef CONFIG_NFS_V4_1 |
169 | if (ds_clp) { | 169 | /* |
170 | /* pNFS is in use, use the DS verf */ | 170 | * pNFS is in use, use the DS verf except commit_through_mds is set |
171 | * for layout segment where nbuckets is zero. | ||
172 | */ | ||
173 | if (ds_clp && dreq->ds_cinfo.nbuckets > 0) { | ||
171 | if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) | 174 | if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) |
172 | verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; | 175 | verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; |
173 | else | 176 | else |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index b34f2e228601..02ec07973bc4 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
@@ -629,23 +629,18 @@ out_put: | |||
629 | goto out; | 629 | goto out; |
630 | } | 630 | } |
631 | 631 | ||
632 | static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl) | 632 | static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) |
633 | { | 633 | { |
634 | int i; | 634 | int i; |
635 | 635 | ||
636 | for (i = 0; i < fl->num_fh; i++) { | 636 | if (fl->fh_array) { |
637 | if (!fl->fh_array[i]) | 637 | for (i = 0; i < fl->num_fh; i++) { |
638 | break; | 638 | if (!fl->fh_array[i]) |
639 | kfree(fl->fh_array[i]); | 639 | break; |
640 | kfree(fl->fh_array[i]); | ||
641 | } | ||
642 | kfree(fl->fh_array); | ||
640 | } | 643 | } |
641 | kfree(fl->fh_array); | ||
642 | fl->fh_array = NULL; | ||
643 | } | ||
644 | |||
645 | static void | ||
646 | _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) | ||
647 | { | ||
648 | filelayout_free_fh_array(fl); | ||
649 | kfree(fl); | 644 | kfree(fl); |
650 | } | 645 | } |
651 | 646 | ||
@@ -716,21 +711,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
716 | /* Do we want to use a mempool here? */ | 711 | /* Do we want to use a mempool here? */ |
717 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); | 712 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); |
718 | if (!fl->fh_array[i]) | 713 | if (!fl->fh_array[i]) |
719 | goto out_err_free; | 714 | goto out_err; |
720 | 715 | ||
721 | p = xdr_inline_decode(&stream, 4); | 716 | p = xdr_inline_decode(&stream, 4); |
722 | if (unlikely(!p)) | 717 | if (unlikely(!p)) |
723 | goto out_err_free; | 718 | goto out_err; |
724 | fl->fh_array[i]->size = be32_to_cpup(p++); | 719 | fl->fh_array[i]->size = be32_to_cpup(p++); |
725 | if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { | 720 | if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { |
726 | printk(KERN_ERR "NFS: Too big fh %d received %d\n", | 721 | printk(KERN_ERR "NFS: Too big fh %d received %d\n", |
727 | i, fl->fh_array[i]->size); | 722 | i, fl->fh_array[i]->size); |
728 | goto out_err_free; | 723 | goto out_err; |
729 | } | 724 | } |
730 | 725 | ||
731 | p = xdr_inline_decode(&stream, fl->fh_array[i]->size); | 726 | p = xdr_inline_decode(&stream, fl->fh_array[i]->size); |
732 | if (unlikely(!p)) | 727 | if (unlikely(!p)) |
733 | goto out_err_free; | 728 | goto out_err; |
734 | memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); | 729 | memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); |
735 | dprintk("DEBUG: %s: fh len %d\n", __func__, | 730 | dprintk("DEBUG: %s: fh len %d\n", __func__, |
736 | fl->fh_array[i]->size); | 731 | fl->fh_array[i]->size); |
@@ -739,8 +734,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
739 | __free_page(scratch); | 734 | __free_page(scratch); |
740 | return 0; | 735 | return 0; |
741 | 736 | ||
742 | out_err_free: | ||
743 | filelayout_free_fh_array(fl); | ||
744 | out_err: | 737 | out_err: |
745 | __free_page(scratch); | 738 | __free_page(scratch); |
746 | return -EIO; | 739 | return -EIO; |
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index d731bbf974aa..0f020e4d8421 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
@@ -175,10 +175,12 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) | |||
175 | { | 175 | { |
176 | struct nfs_server *server = NFS_SERVER(file_inode(filep)); | 176 | struct nfs_server *server = NFS_SERVER(file_inode(filep)); |
177 | struct nfs4_exception exception = { }; | 177 | struct nfs4_exception exception = { }; |
178 | int err; | 178 | loff_t err; |
179 | 179 | ||
180 | do { | 180 | do { |
181 | err = _nfs42_proc_llseek(filep, offset, whence); | 181 | err = _nfs42_proc_llseek(filep, offset, whence); |
182 | if (err >= 0) | ||
183 | break; | ||
182 | if (err == -ENOTSUPP) | 184 | if (err == -ENOTSUPP) |
183 | return -EOPNOTSUPP; | 185 | return -EOPNOTSUPP; |
184 | err = nfs4_handle_exception(server, err, &exception); | 186 | err = nfs4_handle_exception(server, err, &exception); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 693b903b48bd..5133bb18830e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1127,6 +1127,21 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | |||
1127 | return ret; | 1127 | return ret; |
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, | ||
1131 | fmode_t fmode) | ||
1132 | { | ||
1133 | switch(fmode & (FMODE_READ|FMODE_WRITE)) { | ||
1134 | case FMODE_READ|FMODE_WRITE: | ||
1135 | return state->n_rdwr != 0; | ||
1136 | case FMODE_WRITE: | ||
1137 | return state->n_wronly != 0; | ||
1138 | case FMODE_READ: | ||
1139 | return state->n_rdonly != 0; | ||
1140 | } | ||
1141 | WARN_ON_ONCE(1); | ||
1142 | return false; | ||
1143 | } | ||
1144 | |||
1130 | static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) | 1145 | static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) |
1131 | { | 1146 | { |
1132 | int ret = 0; | 1147 | int ret = 0; |
@@ -1443,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) | |||
1443 | if (delegation) | 1458 | if (delegation) |
1444 | delegation_flags = delegation->flags; | 1459 | delegation_flags = delegation->flags; |
1445 | rcu_read_unlock(); | 1460 | rcu_read_unlock(); |
1446 | if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { | 1461 | switch (data->o_arg.claim) { |
1462 | default: | ||
1463 | break; | ||
1464 | case NFS4_OPEN_CLAIM_DELEGATE_CUR: | ||
1465 | case NFS4_OPEN_CLAIM_DELEG_CUR_FH: | ||
1447 | pr_err_ratelimited("NFS: Broken NFSv4 server %s is " | 1466 | pr_err_ratelimited("NFS: Broken NFSv4 server %s is " |
1448 | "returning a delegation for " | 1467 | "returning a delegation for " |
1449 | "OPEN(CLAIM_DELEGATE_CUR)\n", | 1468 | "OPEN(CLAIM_DELEGATE_CUR)\n", |
1450 | clp->cl_hostname); | 1469 | clp->cl_hostname); |
1451 | } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) | 1470 | return; |
1471 | } | ||
1472 | if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) | ||
1452 | nfs_inode_set_delegation(state->inode, | 1473 | nfs_inode_set_delegation(state->inode, |
1453 | data->owner->so_cred, | 1474 | data->owner->so_cred, |
1454 | &data->o_res); | 1475 | &data->o_res); |
@@ -1571,17 +1592,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context | |||
1571 | return opendata; | 1592 | return opendata; |
1572 | } | 1593 | } |
1573 | 1594 | ||
1574 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) | 1595 | static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, |
1596 | fmode_t fmode) | ||
1575 | { | 1597 | { |
1576 | struct nfs4_state *newstate; | 1598 | struct nfs4_state *newstate; |
1577 | int ret; | 1599 | int ret; |
1578 | 1600 | ||
1579 | if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || | 1601 | if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) |
1580 | opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) && | ||
1581 | (opendata->o_arg.u.delegation_type & fmode) != fmode) | ||
1582 | /* This mode can't have been delegated, so we must have | ||
1583 | * a valid open_stateid to cover it - not need to reclaim. | ||
1584 | */ | ||
1585 | return 0; | 1602 | return 0; |
1586 | opendata->o_arg.open_flags = 0; | 1603 | opendata->o_arg.open_flags = 0; |
1587 | opendata->o_arg.fmode = fmode; | 1604 | opendata->o_arg.fmode = fmode; |
@@ -1597,14 +1614,14 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod | |||
1597 | newstate = nfs4_opendata_to_nfs4_state(opendata); | 1614 | newstate = nfs4_opendata_to_nfs4_state(opendata); |
1598 | if (IS_ERR(newstate)) | 1615 | if (IS_ERR(newstate)) |
1599 | return PTR_ERR(newstate); | 1616 | return PTR_ERR(newstate); |
1617 | if (newstate != opendata->state) | ||
1618 | ret = -ESTALE; | ||
1600 | nfs4_close_state(newstate, fmode); | 1619 | nfs4_close_state(newstate, fmode); |
1601 | *res = newstate; | 1620 | return ret; |
1602 | return 0; | ||
1603 | } | 1621 | } |
1604 | 1622 | ||
1605 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) | 1623 | static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) |
1606 | { | 1624 | { |
1607 | struct nfs4_state *newstate; | ||
1608 | int ret; | 1625 | int ret; |
1609 | 1626 | ||
1610 | /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ | 1627 | /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ |
@@ -1615,27 +1632,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * | |||
1615 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | 1632 | clear_bit(NFS_DELEGATED_STATE, &state->flags); |
1616 | clear_bit(NFS_OPEN_STATE, &state->flags); | 1633 | clear_bit(NFS_OPEN_STATE, &state->flags); |
1617 | smp_rmb(); | 1634 | smp_rmb(); |
1618 | if (state->n_rdwr != 0) { | 1635 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); |
1619 | ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); | 1636 | if (ret != 0) |
1620 | if (ret != 0) | 1637 | return ret; |
1621 | return ret; | 1638 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); |
1622 | if (newstate != state) | 1639 | if (ret != 0) |
1623 | return -ESTALE; | 1640 | return ret; |
1624 | } | 1641 | ret = nfs4_open_recover_helper(opendata, FMODE_READ); |
1625 | if (state->n_wronly != 0) { | 1642 | if (ret != 0) |
1626 | ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); | 1643 | return ret; |
1627 | if (ret != 0) | ||
1628 | return ret; | ||
1629 | if (newstate != state) | ||
1630 | return -ESTALE; | ||
1631 | } | ||
1632 | if (state->n_rdonly != 0) { | ||
1633 | ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); | ||
1634 | if (ret != 0) | ||
1635 | return ret; | ||
1636 | if (newstate != state) | ||
1637 | return -ESTALE; | ||
1638 | } | ||
1639 | /* | 1644 | /* |
1640 | * We may have performed cached opens for all three recoveries. | 1645 | * We may have performed cached opens for all three recoveries. |
1641 | * Check if we need to update the current stateid. | 1646 | * Check if we need to update the current stateid. |
@@ -1759,18 +1764,35 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct | |||
1759 | return err; | 1764 | return err; |
1760 | } | 1765 | } |
1761 | 1766 | ||
1762 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) | 1767 | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, |
1768 | struct nfs4_state *state, const nfs4_stateid *stateid, | ||
1769 | fmode_t type) | ||
1763 | { | 1770 | { |
1764 | struct nfs_server *server = NFS_SERVER(state->inode); | 1771 | struct nfs_server *server = NFS_SERVER(state->inode); |
1765 | struct nfs4_opendata *opendata; | 1772 | struct nfs4_opendata *opendata; |
1766 | int err; | 1773 | int err = 0; |
1767 | 1774 | ||
1768 | opendata = nfs4_open_recoverdata_alloc(ctx, state, | 1775 | opendata = nfs4_open_recoverdata_alloc(ctx, state, |
1769 | NFS4_OPEN_CLAIM_DELEG_CUR_FH); | 1776 | NFS4_OPEN_CLAIM_DELEG_CUR_FH); |
1770 | if (IS_ERR(opendata)) | 1777 | if (IS_ERR(opendata)) |
1771 | return PTR_ERR(opendata); | 1778 | return PTR_ERR(opendata); |
1772 | nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); | 1779 | nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); |
1773 | err = nfs4_open_recover(opendata, state); | 1780 | write_seqlock(&state->seqlock); |
1781 | nfs4_stateid_copy(&state->stateid, &state->open_stateid); | ||
1782 | write_sequnlock(&state->seqlock); | ||
1783 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | ||
1784 | switch (type & (FMODE_READ|FMODE_WRITE)) { | ||
1785 | case FMODE_READ|FMODE_WRITE: | ||
1786 | case FMODE_WRITE: | ||
1787 | err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); | ||
1788 | if (err) | ||
1789 | break; | ||
1790 | err = nfs4_open_recover_helper(opendata, FMODE_WRITE); | ||
1791 | if (err) | ||
1792 | break; | ||
1793 | case FMODE_READ: | ||
1794 | err = nfs4_open_recover_helper(opendata, FMODE_READ); | ||
1795 | } | ||
1774 | nfs4_opendata_put(opendata); | 1796 | nfs4_opendata_put(opendata); |
1775 | return nfs4_handle_delegation_recall_error(server, state, stateid, err); | 1797 | return nfs4_handle_delegation_recall_error(server, state, stateid, err); |
1776 | } | 1798 | } |
@@ -1850,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) | |||
1850 | data->rpc_done = 0; | 1872 | data->rpc_done = 0; |
1851 | data->rpc_status = 0; | 1873 | data->rpc_status = 0; |
1852 | data->timestamp = jiffies; | 1874 | data->timestamp = jiffies; |
1875 | if (data->is_recover) | ||
1876 | nfs4_set_sequence_privileged(&data->c_arg.seq_args); | ||
1853 | task = rpc_run_task(&task_setup_data); | 1877 | task = rpc_run_task(&task_setup_data); |
1854 | if (IS_ERR(task)) | 1878 | if (IS_ERR(task)) |
1855 | return PTR_ERR(task); | 1879 | return PTR_ERR(task); |
@@ -2645,6 +2669,15 @@ out: | |||
2645 | return err; | 2669 | return err; |
2646 | } | 2670 | } |
2647 | 2671 | ||
2672 | static bool | ||
2673 | nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) | ||
2674 | { | ||
2675 | if (inode == NULL || !nfs_have_layout(inode)) | ||
2676 | return false; | ||
2677 | |||
2678 | return pnfs_wait_on_layoutreturn(inode, task); | ||
2679 | } | ||
2680 | |||
2648 | struct nfs4_closedata { | 2681 | struct nfs4_closedata { |
2649 | struct inode *inode; | 2682 | struct inode *inode; |
2650 | struct nfs4_state *state; | 2683 | struct nfs4_state *state; |
@@ -2763,6 +2796,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
2763 | goto out_no_action; | 2796 | goto out_no_action; |
2764 | } | 2797 | } |
2765 | 2798 | ||
2799 | if (nfs4_wait_on_layoutreturn(inode, task)) { | ||
2800 | nfs_release_seqid(calldata->arg.seqid); | ||
2801 | goto out_wait; | ||
2802 | } | ||
2803 | |||
2766 | if (calldata->arg.fmode == 0) | 2804 | if (calldata->arg.fmode == 0) |
2767 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; | 2805 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; |
2768 | if (calldata->roc) | 2806 | if (calldata->roc) |
@@ -5308,6 +5346,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) | |||
5308 | 5346 | ||
5309 | d_data = (struct nfs4_delegreturndata *)data; | 5347 | d_data = (struct nfs4_delegreturndata *)data; |
5310 | 5348 | ||
5349 | if (nfs4_wait_on_layoutreturn(d_data->inode, task)) | ||
5350 | return; | ||
5351 | |||
5311 | if (d_data->roc) | 5352 | if (d_data->roc) |
5312 | pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); | 5353 | pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); |
5313 | 5354 | ||
@@ -7800,39 +7841,46 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) | |||
7800 | dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", | 7841 | dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", |
7801 | __func__, delay); | 7842 | __func__, delay); |
7802 | rpc_delay(task, delay); | 7843 | rpc_delay(task, delay); |
7803 | task->tk_status = 0; | 7844 | /* Do not call nfs4_async_handle_error() */ |
7804 | rpc_restart_call_prepare(task); | 7845 | goto out_restart; |
7805 | goto out; /* Do not call nfs4_async_handle_error() */ | ||
7806 | } | 7846 | } |
7807 | break; | 7847 | break; |
7808 | case -NFS4ERR_EXPIRED: | 7848 | case -NFS4ERR_EXPIRED: |
7809 | case -NFS4ERR_BAD_STATEID: | 7849 | case -NFS4ERR_BAD_STATEID: |
7810 | spin_lock(&inode->i_lock); | 7850 | spin_lock(&inode->i_lock); |
7811 | lo = NFS_I(inode)->layout; | 7851 | if (nfs4_stateid_match(&lgp->args.stateid, |
7812 | if (!lo || list_empty(&lo->plh_segs)) { | 7852 | &lgp->args.ctx->state->stateid)) { |
7813 | spin_unlock(&inode->i_lock); | 7853 | spin_unlock(&inode->i_lock); |
7814 | /* If the open stateid was bad, then recover it. */ | 7854 | /* If the open stateid was bad, then recover it. */ |
7815 | state = lgp->args.ctx->state; | 7855 | state = lgp->args.ctx->state; |
7816 | } else { | 7856 | break; |
7857 | } | ||
7858 | lo = NFS_I(inode)->layout; | ||
7859 | if (lo && nfs4_stateid_match(&lgp->args.stateid, | ||
7860 | &lo->plh_stateid)) { | ||
7817 | LIST_HEAD(head); | 7861 | LIST_HEAD(head); |
7818 | 7862 | ||
7819 | /* | 7863 | /* |
7820 | * Mark the bad layout state as invalid, then retry | 7864 | * Mark the bad layout state as invalid, then retry |
7821 | * with the current stateid. | 7865 | * with the current stateid. |
7822 | */ | 7866 | */ |
7867 | set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); | ||
7823 | pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); | 7868 | pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); |
7824 | spin_unlock(&inode->i_lock); | 7869 | spin_unlock(&inode->i_lock); |
7825 | pnfs_free_lseg_list(&head); | 7870 | pnfs_free_lseg_list(&head); |
7826 | 7871 | } else | |
7827 | task->tk_status = 0; | 7872 | spin_unlock(&inode->i_lock); |
7828 | rpc_restart_call_prepare(task); | 7873 | goto out_restart; |
7829 | } | ||
7830 | } | 7874 | } |
7831 | if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) | 7875 | if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) |
7832 | rpc_restart_call_prepare(task); | 7876 | goto out_restart; |
7833 | out: | 7877 | out: |
7834 | dprintk("<-- %s\n", __func__); | 7878 | dprintk("<-- %s\n", __func__); |
7835 | return; | 7879 | return; |
7880 | out_restart: | ||
7881 | task->tk_status = 0; | ||
7882 | rpc_restart_call_prepare(task); | ||
7883 | return; | ||
7836 | out_overflow: | 7884 | out_overflow: |
7837 | task->tk_status = -EOVERFLOW; | 7885 | task->tk_status = -EOVERFLOW; |
7838 | goto out; | 7886 | goto out; |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index da73bc443238..d854693a15b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1481,7 +1481,7 @@ restart: | |||
1481 | spin_unlock(&state->state_lock); | 1481 | spin_unlock(&state->state_lock); |
1482 | } | 1482 | } |
1483 | nfs4_put_open_state(state); | 1483 | nfs4_put_open_state(state); |
1484 | clear_bit(NFS4CLNT_RECLAIM_NOGRACE, | 1484 | clear_bit(NFS_STATE_RECLAIM_NOGRACE, |
1485 | &state->flags); | 1485 | &state->flags); |
1486 | spin_lock(&sp->so_lock); | 1486 | spin_lock(&sp->so_lock); |
1487 | goto restart; | 1487 | goto restart; |
@@ -1725,7 +1725,8 @@ restart: | |||
1725 | if (!test_and_clear_bit(ops->owner_flag_bit, | 1725 | if (!test_and_clear_bit(ops->owner_flag_bit, |
1726 | &sp->so_flags)) | 1726 | &sp->so_flags)) |
1727 | continue; | 1727 | continue; |
1728 | atomic_inc(&sp->so_count); | 1728 | if (!atomic_inc_not_zero(&sp->so_count)) |
1729 | continue; | ||
1729 | spin_unlock(&clp->cl_lock); | 1730 | spin_unlock(&clp->cl_lock); |
1730 | rcu_read_unlock(); | 1731 | rcu_read_unlock(); |
1731 | 1732 | ||
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 28df12e525ba..671cf68fe56b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h | |||
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event, | |||
409 | __entry->flags = flags; | 409 | __entry->flags = flags; |
410 | __entry->fmode = (__force unsigned int)ctx->mode; | 410 | __entry->fmode = (__force unsigned int)ctx->mode; |
411 | __entry->dev = ctx->dentry->d_sb->s_dev; | 411 | __entry->dev = ctx->dentry->d_sb->s_dev; |
412 | if (!IS_ERR(state)) | 412 | if (!IS_ERR_OR_NULL(state)) |
413 | inode = state->inode; | 413 | inode = state->inode; |
414 | if (inode != NULL) { | 414 | if (inode != NULL) { |
415 | __entry->fileid = NFS_FILEID(inode); | 415 | __entry->fileid = NFS_FILEID(inode); |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7c5718ba625e..fe3ddd20ff89 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, | |||
508 | * for it without upsetting the slab allocator. | 508 | * for it without upsetting the slab allocator. |
509 | */ | 509 | */ |
510 | if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * | 510 | if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * |
511 | sizeof(struct page) > PAGE_SIZE) | 511 | sizeof(struct page *) > PAGE_SIZE) |
512 | return 0; | 512 | return 0; |
513 | 513 | ||
514 | return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); | 514 | return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index ba1246433794..8abe27165ad0 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1104,20 +1104,15 @@ bool pnfs_roc(struct inode *ino) | |||
1104 | mark_lseg_invalid(lseg, &tmp_list); | 1104 | mark_lseg_invalid(lseg, &tmp_list); |
1105 | found = true; | 1105 | found = true; |
1106 | } | 1106 | } |
1107 | /* pnfs_prepare_layoutreturn() grabs lo ref and it will be put | 1107 | /* ROC in two conditions: |
1108 | * in pnfs_roc_release(). We don't really send a layoutreturn but | ||
1109 | * still want others to view us like we are sending one! | ||
1110 | * | ||
1111 | * If pnfs_prepare_layoutreturn() fails, it means someone else is doing | ||
1112 | * LAYOUTRETURN, so we proceed like there are no layouts to return. | ||
1113 | * | ||
1114 | * ROC in three conditions: | ||
1115 | * 1. there are ROC lsegs | 1108 | * 1. there are ROC lsegs |
1116 | * 2. we don't send layoutreturn | 1109 | * 2. we don't send layoutreturn |
1117 | * 3. no others are sending layoutreturn | ||
1118 | */ | 1110 | */ |
1119 | if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo)) | 1111 | if (found && !layoutreturn) { |
1112 | /* lo ref dropped in pnfs_roc_release() */ | ||
1113 | pnfs_get_layout_hdr(lo); | ||
1120 | roc = true; | 1114 | roc = true; |
1115 | } | ||
1121 | 1116 | ||
1122 | out_noroc: | 1117 | out_noroc: |
1123 | spin_unlock(&ino->i_lock); | 1118 | spin_unlock(&ino->i_lock); |
@@ -1172,6 +1167,26 @@ void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) | |||
1172 | spin_unlock(&ino->i_lock); | 1167 | spin_unlock(&ino->i_lock); |
1173 | } | 1168 | } |
1174 | 1169 | ||
1170 | bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) | ||
1171 | { | ||
1172 | struct nfs_inode *nfsi = NFS_I(ino); | ||
1173 | struct pnfs_layout_hdr *lo; | ||
1174 | bool sleep = false; | ||
1175 | |||
1176 | /* we might not have grabbed lo reference. so need to check under | ||
1177 | * i_lock */ | ||
1178 | spin_lock(&ino->i_lock); | ||
1179 | lo = nfsi->layout; | ||
1180 | if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) | ||
1181 | sleep = true; | ||
1182 | spin_unlock(&ino->i_lock); | ||
1183 | |||
1184 | if (sleep) | ||
1185 | rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); | ||
1186 | |||
1187 | return sleep; | ||
1188 | } | ||
1189 | |||
1175 | /* | 1190 | /* |
1176 | * Compare two layout segments for sorting into layout cache. | 1191 | * Compare two layout segments for sorting into layout cache. |
1177 | * We want to preferentially return RW over RO layouts, so ensure those | 1192 | * We want to preferentially return RW over RO layouts, so ensure those |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 78c9351ff117..d1990e90e7a0 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -270,6 +270,7 @@ bool pnfs_roc(struct inode *ino); | |||
270 | void pnfs_roc_release(struct inode *ino); | 270 | void pnfs_roc_release(struct inode *ino); |
271 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); | 271 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); |
272 | void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); | 272 | void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); |
273 | bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task); | ||
273 | void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); | 274 | void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); |
274 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); | 275 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); |
275 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); | 276 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); |
@@ -639,6 +640,12 @@ pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) | |||
639 | { | 640 | { |
640 | } | 641 | } |
641 | 642 | ||
643 | static inline bool | ||
644 | pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) | ||
645 | { | ||
646 | return false; | ||
647 | } | ||
648 | |||
642 | static inline void set_pnfs_layoutdriver(struct nfs_server *s, | 649 | static inline void set_pnfs_layoutdriver(struct nfs_server *s, |
643 | const struct nfs_fh *mntfh, u32 id) | 650 | const struct nfs_fh *mntfh, u32 id) |
644 | { | 651 | { |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index ae0ff7a11b40..01b8cc8e8cfc 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) | |||
72 | { | 72 | { |
73 | struct nfs_pgio_mirror *mirror; | 73 | struct nfs_pgio_mirror *mirror; |
74 | 74 | ||
75 | if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) | ||
76 | pgio->pg_ops->pg_cleanup(pgio); | ||
77 | |||
75 | pgio->pg_ops = &nfs_pgio_rw_ops; | 78 | pgio->pg_ops = &nfs_pgio_rw_ops; |
76 | 79 | ||
77 | /* read path should never have more than one mirror */ | 80 | /* read path should never have more than one mirror */ |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 388f48079c43..75ab7622e0cc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
569 | if (!nfs_pageio_add_request(pgio, req)) { | 569 | if (!nfs_pageio_add_request(pgio, req)) { |
570 | nfs_redirty_request(req); | 570 | nfs_redirty_request(req); |
571 | ret = pgio->pg_error; | 571 | ret = pgio->pg_error; |
572 | } | 572 | } else |
573 | nfs_add_stats(page_file_mapping(page)->host, | ||
574 | NFSIOS_WRITEPAGES, 1); | ||
573 | out: | 575 | out: |
574 | return ret; | 576 | return ret; |
575 | } | 577 | } |
576 | 578 | ||
577 | static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) | 579 | static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) |
578 | { | 580 | { |
579 | struct inode *inode = page_file_mapping(page)->host; | ||
580 | int ret; | 581 | int ret; |
581 | 582 | ||
582 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | ||
583 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | ||
584 | |||
585 | nfs_pageio_cond_complete(pgio, page_file_index(page)); | 583 | nfs_pageio_cond_complete(pgio, page_file_index(page)); |
586 | ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); | 584 | ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); |
587 | if (ret == -EAGAIN) { | 585 | if (ret == -EAGAIN) { |
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st | |||
597 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | 595 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
598 | { | 596 | { |
599 | struct nfs_pageio_descriptor pgio; | 597 | struct nfs_pageio_descriptor pgio; |
598 | struct inode *inode = page_file_mapping(page)->host; | ||
600 | int err; | 599 | int err; |
601 | 600 | ||
602 | nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), | 601 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
602 | nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), | ||
603 | false, &nfs_async_write_completion_ops); | 603 | false, &nfs_async_write_completion_ops); |
604 | err = nfs_do_writepage(page, wbc, &pgio); | 604 | err = nfs_do_writepage(page, wbc, &pgio); |
605 | nfs_pageio_complete(&pgio); | 605 | nfs_pageio_complete(&pgio); |
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino | |||
1223 | return 1; | 1223 | return 1; |
1224 | if (!flctx || (list_empty_careful(&flctx->flc_flock) && | 1224 | if (!flctx || (list_empty_careful(&flctx->flc_flock) && |
1225 | list_empty_careful(&flctx->flc_posix))) | 1225 | list_empty_careful(&flctx->flc_posix))) |
1226 | return 0; | 1226 | return 1; |
1227 | 1227 | ||
1228 | /* Check to see if there are whole file write locks */ | 1228 | /* Check to see if there are whole file write locks */ |
1229 | ret = 0; | 1229 | ret = 0; |
@@ -1351,6 +1351,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) | |||
1351 | { | 1351 | { |
1352 | struct nfs_pgio_mirror *mirror; | 1352 | struct nfs_pgio_mirror *mirror; |
1353 | 1353 | ||
1354 | if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) | ||
1355 | pgio->pg_ops->pg_cleanup(pgio); | ||
1356 | |||
1354 | pgio->pg_ops = &nfs_pgio_rw_ops; | 1357 | pgio->pg_ops = &nfs_pgio_rw_ops; |
1355 | 1358 | ||
1356 | nfs_pageio_stop_mirroring(pgio); | 1359 | nfs_pageio_stop_mirroring(pgio); |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdefaa331a07..c29d9421bd5e 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
@@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, | |||
56 | u32 device_generation = 0; | 56 | u32 device_generation = 0; |
57 | int error; | 57 | int error; |
58 | 58 | ||
59 | /* | ||
60 | * We do not attempt to support I/O smaller than the fs block size, | ||
61 | * or not aligned to it. | ||
62 | */ | ||
63 | if (args->lg_minlength < block_size) { | ||
64 | dprintk("pnfsd: I/O too small\n"); | ||
65 | goto out_layoutunavailable; | ||
66 | } | ||
67 | if (seg->offset & (block_size - 1)) { | 59 | if (seg->offset & (block_size - 1)) { |
68 | dprintk("pnfsd: I/O misaligned\n"); | 60 | dprintk("pnfsd: I/O misaligned\n"); |
69 | goto out_layoutunavailable; | 61 | goto out_layoutunavailable; |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 46b8b2bbc95a..ee5aa4daaea0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1439 | int found, ret; | 1439 | int found, ret; |
1440 | int set_maybe; | 1440 | int set_maybe; |
1441 | int dispatch_assert = 0; | 1441 | int dispatch_assert = 0; |
1442 | int dispatched = 0; | ||
1442 | 1443 | ||
1443 | if (!dlm_grab(dlm)) | 1444 | if (!dlm_grab(dlm)) |
1444 | return DLM_MASTER_RESP_NO; | 1445 | return DLM_MASTER_RESP_NO; |
@@ -1658,15 +1659,18 @@ send_response: | |||
1658 | mlog(ML_ERROR, "failed to dispatch assert master work\n"); | 1659 | mlog(ML_ERROR, "failed to dispatch assert master work\n"); |
1659 | response = DLM_MASTER_RESP_ERROR; | 1660 | response = DLM_MASTER_RESP_ERROR; |
1660 | dlm_lockres_put(res); | 1661 | dlm_lockres_put(res); |
1661 | } else | 1662 | } else { |
1663 | dispatched = 1; | ||
1662 | __dlm_lockres_grab_inflight_worker(dlm, res); | 1664 | __dlm_lockres_grab_inflight_worker(dlm, res); |
1665 | } | ||
1663 | spin_unlock(&res->spinlock); | 1666 | spin_unlock(&res->spinlock); |
1664 | } else { | 1667 | } else { |
1665 | if (res) | 1668 | if (res) |
1666 | dlm_lockres_put(res); | 1669 | dlm_lockres_put(res); |
1667 | } | 1670 | } |
1668 | 1671 | ||
1669 | dlm_put(dlm); | 1672 | if (!dispatched) |
1673 | dlm_put(dlm); | ||
1670 | return response; | 1674 | return response; |
1671 | } | 1675 | } |
1672 | 1676 | ||
@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | |||
2090 | 2094 | ||
2091 | 2095 | ||
2092 | /* queue up work for dlm_assert_master_worker */ | 2096 | /* queue up work for dlm_assert_master_worker */ |
2093 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
2094 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); | 2097 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); |
2095 | item->u.am.lockres = res; /* already have a ref */ | 2098 | item->u.am.lockres = res; /* already have a ref */ |
2096 | /* can optionally ignore node numbers higher than this node */ | 2099 | /* can optionally ignore node numbers higher than this node */ |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index ce12e0b1a31f..3d90ad7ff91f 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1694 | unsigned int hash; | 1694 | unsigned int hash; |
1695 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; | 1695 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; |
1696 | u32 flags = DLM_ASSERT_MASTER_REQUERY; | 1696 | u32 flags = DLM_ASSERT_MASTER_REQUERY; |
1697 | int dispatched = 0; | ||
1697 | 1698 | ||
1698 | if (!dlm_grab(dlm)) { | 1699 | if (!dlm_grab(dlm)) { |
1699 | /* since the domain has gone away on this | 1700 | /* since the domain has gone away on this |
@@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1719 | dlm_put(dlm); | 1720 | dlm_put(dlm); |
1720 | /* sender will take care of this and retry */ | 1721 | /* sender will take care of this and retry */ |
1721 | return ret; | 1722 | return ret; |
1722 | } else | 1723 | } else { |
1724 | dispatched = 1; | ||
1723 | __dlm_lockres_grab_inflight_worker(dlm, res); | 1725 | __dlm_lockres_grab_inflight_worker(dlm, res); |
1726 | } | ||
1724 | spin_unlock(&res->spinlock); | 1727 | spin_unlock(&res->spinlock); |
1725 | } else { | 1728 | } else { |
1726 | /* put.. incase we are not the master */ | 1729 | /* put.. incase we are not the master */ |
@@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1730 | } | 1733 | } |
1731 | spin_unlock(&dlm->spinlock); | 1734 | spin_unlock(&dlm->spinlock); |
1732 | 1735 | ||
1733 | dlm_put(dlm); | 1736 | if (!dispatched) |
1737 | dlm_put(dlm); | ||
1734 | return master; | 1738 | return master; |
1735 | } | 1739 | } |
1736 | 1740 | ||
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index ba1323a94924..a586467f6ff6 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
70 | unsigned order; | 70 | unsigned order; |
71 | void *data; | 71 | void *data; |
72 | int ret; | 72 | int ret; |
73 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); | ||
73 | 74 | ||
74 | /* make various checks */ | 75 | /* make various checks */ |
75 | order = get_order(newsize); | 76 | order = get_order(newsize); |
@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
84 | 85 | ||
85 | /* allocate enough contiguous pages to be able to satisfy the | 86 | /* allocate enough contiguous pages to be able to satisfy the |
86 | * request */ | 87 | * request */ |
87 | pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); | 88 | pages = alloc_pages(gfp, order); |
88 | if (!pages) | 89 | if (!pages) |
89 | return -ENOMEM; | 90 | return -ENOMEM; |
90 | 91 | ||
@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
108 | struct page *page = pages + loop; | 109 | struct page *page = pages + loop; |
109 | 110 | ||
110 | ret = add_to_page_cache_lru(page, inode->i_mapping, loop, | 111 | ret = add_to_page_cache_lru(page, inode->i_mapping, loop, |
111 | GFP_KERNEL); | 112 | gfp); |
112 | if (ret < 0) | 113 | if (ret < 0) |
113 | goto add_error; | 114 | goto add_error; |
114 | 115 | ||
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, | |||
652 | { | 652 | { |
653 | int err; | 653 | int err; |
654 | 654 | ||
655 | mutex_lock(&inode->i_mutex); | ||
656 | err = security_inode_init_security(inode, dentry, qstr, | 655 | err = security_inode_init_security(inode, dentry, qstr, |
657 | &init_xattrs, 0); | 656 | &init_xattrs, 0); |
658 | mutex_unlock(&inode->i_mutex); | ||
659 | |||
660 | if (err) { | 657 | if (err) { |
661 | struct ubifs_info *c = dentry->i_sb->s_fs_info; | 658 | struct ubifs_info *c = dentry->i_sb->s_fs_info; |
662 | ubifs_err(c, "cannot initialize security for inode %lu, error %d", | 659 | ubifs_err(c, "cannot initialize security for inode %lu, error %d", |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 634e676072cb..50311703135b 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -467,8 +467,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
467 | * the fault_*wqh. | 467 | * the fault_*wqh. |
468 | */ | 468 | */ |
469 | spin_lock(&ctx->fault_pending_wqh.lock); | 469 | spin_lock(&ctx->fault_pending_wqh.lock); |
470 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range); | 470 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
471 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range); | 471 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); |
472 | spin_unlock(&ctx->fault_pending_wqh.lock); | 472 | spin_unlock(&ctx->fault_pending_wqh.lock); |
473 | 473 | ||
474 | wake_up_poll(&ctx->fd_wqh, POLLHUP); | 474 | wake_up_poll(&ctx->fd_wqh, POLLHUP); |
@@ -650,10 +650,10 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx, | |||
650 | spin_lock(&ctx->fault_pending_wqh.lock); | 650 | spin_lock(&ctx->fault_pending_wqh.lock); |
651 | /* wake all in the range and autoremove */ | 651 | /* wake all in the range and autoremove */ |
652 | if (waitqueue_active(&ctx->fault_pending_wqh)) | 652 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
653 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, | 653 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
654 | range); | 654 | range); |
655 | if (waitqueue_active(&ctx->fault_wqh)) | 655 | if (waitqueue_active(&ctx->fault_wqh)) |
656 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range); | 656 | __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range); |
657 | spin_unlock(&ctx->fault_pending_wqh.lock); | 657 | spin_unlock(&ctx->fault_pending_wqh.lock); |
658 | } | 658 | } |
659 | 659 | ||
@@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags) | |||
1287 | 1287 | ||
1288 | file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, | 1288 | file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, |
1289 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); | 1289 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); |
1290 | if (IS_ERR(file)) | 1290 | if (IS_ERR(file)) { |
1291 | mmput(ctx->mm); | ||
1291 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); | 1292 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
1293 | } | ||
1292 | out: | 1294 | out: |
1293 | return file; | 1295 | return file; |
1294 | } | 1296 | } |
diff --git a/include/acpi/button.h b/include/acpi/button.h index 97eea0e4c016..1cad8b2d460c 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/notifier.h> | 4 | #include <linux/notifier.h> |
5 | 5 | ||
6 | #if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) | 6 | #if IS_ENABLED(CONFIG_ACPI_BUTTON) |
7 | extern int acpi_lid_notifier_register(struct notifier_block *nb); | 7 | extern int acpi_lid_notifier_register(struct notifier_block *nb); |
8 | extern int acpi_lid_notifier_unregister(struct notifier_block *nb); | 8 | extern int acpi_lid_notifier_unregister(struct notifier_block *nb); |
9 | extern int acpi_lid_open(void); | 9 | extern int acpi_lid_open(void); |
@@ -20,6 +20,6 @@ static inline int acpi_lid_open(void) | |||
20 | { | 20 | { |
21 | return 1; | 21 | return 1; |
22 | } | 22 | } |
23 | #endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ | 23 | #endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ |
24 | 24 | ||
25 | #endif /* ACPI_BUTTON_H */ | 25 | #endif /* ACPI_BUTTON_H */ |
diff --git a/include/acpi/video.h b/include/acpi/video.h index e840b294c6f5..c62392d9b52a 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h | |||
@@ -24,7 +24,7 @@ enum acpi_backlight_type { | |||
24 | acpi_backlight_native, | 24 | acpi_backlight_native, |
25 | }; | 25 | }; |
26 | 26 | ||
27 | #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) | 27 | #if IS_ENABLED(CONFIG_ACPI_VIDEO) |
28 | extern int acpi_video_register(void); | 28 | extern int acpi_video_register(void); |
29 | extern void acpi_video_unregister(void); | 29 | extern void acpi_video_unregister(void); |
30 | extern int acpi_video_get_edid(struct acpi_device *device, int type, | 30 | extern int acpi_video_get_edid(struct acpi_device *device, int type, |
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index f20f407ce45d..4b4b056a6eb0 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h | |||
@@ -73,7 +73,7 @@ | |||
73 | * Convert a physical address to a Page Frame Number and back | 73 | * Convert a physical address to a Page Frame Number and back |
74 | */ | 74 | */ |
75 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) | 75 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) |
76 | #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) | 76 | #define __pfn_to_phys(pfn) PFN_PHYS(pfn) |
77 | 77 | ||
78 | #define page_to_pfn __page_to_pfn | 78 | #define page_to_pfn __page_to_pfn |
79 | #define pfn_to_page __pfn_to_page | 79 | #define pfn_to_page __pfn_to_page |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 83bfb87f5bf1..e2aadbc7151f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) | |||
111 | cpu_relax(); | 111 | cpu_relax(); |
112 | } | 112 | } |
113 | 113 | ||
114 | #ifndef virt_queued_spin_lock | 114 | #ifndef virt_spin_lock |
115 | static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) | 115 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
116 | { | 116 | { |
117 | return false; | 117 | return false; |
118 | } | 118 | } |
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h | |||
@@ -1,15 +1,10 @@ | |||
1 | #ifndef _ASM_WORD_AT_A_TIME_H | 1 | #ifndef _ASM_WORD_AT_A_TIME_H |
2 | #define _ASM_WORD_AT_A_TIME_H | 2 | #define _ASM_WORD_AT_A_TIME_H |
3 | 3 | ||
4 | /* | ||
5 | * This says "generic", but it's actually big-endian only. | ||
6 | * Little-endian can use more efficient versions of these | ||
7 | * interfaces, see for example | ||
8 | * arch/x86/include/asm/word-at-a-time.h | ||
9 | * for those. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <asm/byteorder.h> | ||
6 | |||
7 | #ifdef __BIG_ENDIAN | ||
13 | 8 | ||
14 | struct word_at_a_time { | 9 | struct word_at_a_time { |
15 | const unsigned long high_bits, low_bits; | 10 | const unsigned long high_bits, low_bits; |
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
53 | #define zero_bytemask(mask) (~1ul << __fls(mask)) | 48 | #define zero_bytemask(mask) (~1ul << __fls(mask)) |
54 | #endif | 49 | #endif |
55 | 50 | ||
51 | #else | ||
52 | |||
53 | /* | ||
54 | * The optimal byte mask counting is probably going to be something | ||
55 | * that is architecture-specific. If you have a reliably fast | ||
56 | * bit count instruction, that might be better than the multiply | ||
57 | * and shift, for example. | ||
58 | */ | ||
59 | struct word_at_a_time { | ||
60 | const unsigned long one_bits, high_bits; | ||
61 | }; | ||
62 | |||
63 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
64 | |||
65 | #ifdef CONFIG_64BIT | ||
66 | |||
67 | /* | ||
68 | * Jan Achrenius on G+: microoptimized version of | ||
69 | * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" | ||
70 | * that works for the bytemasks without having to | ||
71 | * mask them first. | ||
72 | */ | ||
73 | static inline long count_masked_bytes(unsigned long mask) | ||
74 | { | ||
75 | return mask*0x0001020304050608ul >> 56; | ||
76 | } | ||
77 | |||
78 | #else /* 32-bit case */ | ||
79 | |||
80 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
81 | static inline long count_masked_bytes(long mask) | ||
82 | { | ||
83 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
84 | long a = (0x0ff0001+mask) >> 23; | ||
85 | /* Fix the 1 for 00 case */ | ||
86 | return a & mask; | ||
87 | } | ||
88 | |||
89 | #endif | ||
90 | |||
91 | /* Return nonzero if it has a zero */ | ||
92 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
93 | { | ||
94 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
95 | *bits = mask; | ||
96 | return mask; | ||
97 | } | ||
98 | |||
99 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
100 | { | ||
101 | return bits; | ||
102 | } | ||
103 | |||
104 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
105 | { | ||
106 | bits = (bits - 1) & ~bits; | ||
107 | return bits >> 7; | ||
108 | } | ||
109 | |||
110 | /* The mask we created is directly usable as a bytemask */ | ||
111 | #define zero_bytemask(mask) (mask) | ||
112 | |||
113 | static inline unsigned long find_zero(unsigned long mask) | ||
114 | { | ||
115 | return count_masked_bytes(mask); | ||
116 | } | ||
117 | |||
118 | #endif /* __BIG_ENDIAN */ | ||
119 | |||
56 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 120 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); | |||
240 | 240 | ||
241 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); | 241 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); |
242 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); | 242 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); |
243 | extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); | ||
243 | 244 | ||
244 | #endif | 245 | #endif |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -568,6 +568,10 @@ | |||
568 | #define MODE_I2C_READ 4 | 568 | #define MODE_I2C_READ 4 |
569 | #define MODE_I2C_STOP 8 | 569 | #define MODE_I2C_STOP 8 |
570 | 570 | ||
571 | /* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ | ||
572 | #define DP_MST_PHYSICAL_PORT_0 0 | ||
573 | #define DP_MST_LOGICAL_PORT_0 8 | ||
574 | |||
571 | #define DP_LINK_STATUS_SIZE 6 | 575 | #define DP_LINK_STATUS_SIZE 6 |
572 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], | 576 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
573 | int lane_count); | 577 | int lane_count); |
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..5340099741ae 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h | |||
@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { | |||
253 | u8 *bytes; | 253 | u8 *bytes; |
254 | }; | 254 | }; |
255 | 255 | ||
256 | #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 | ||
256 | struct drm_dp_remote_i2c_read { | 257 | struct drm_dp_remote_i2c_read { |
257 | u8 num_transactions; | 258 | u8 num_transactions; |
258 | u8 port_number; | 259 | u8 port_number; |
@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { | |||
262 | u8 *bytes; | 263 | u8 *bytes; |
263 | u8 no_stop_bit; | 264 | u8 no_stop_bit; |
264 | u8 i2c_transaction_delay; | 265 | u8 i2c_transaction_delay; |
265 | } transactions[4]; | 266 | } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; |
266 | u8 read_i2c_device_id; | 267 | u8 read_i2c_device_id; |
267 | u8 num_bytes_read; | 268 | u8 num_bytes_read; |
268 | }; | 269 | }; |
@@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr; | |||
374 | struct drm_dp_mst_topology_cbs { | 375 | struct drm_dp_mst_topology_cbs { |
375 | /* create a connector for a port */ | 376 | /* create a connector for a port */ |
376 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); | 377 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
378 | void (*register_connector)(struct drm_connector *connector); | ||
377 | void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, | 379 | void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
378 | struct drm_connector *connector); | 380 | struct drm_connector *connector); |
379 | void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); | 381 | void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d901f1a47be6..4e14dac282bb 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -35,11 +35,7 @@ | |||
35 | #define VGIC_V3_MAX_LRS 16 | 35 | #define VGIC_V3_MAX_LRS 16 |
36 | #define VGIC_MAX_IRQS 1024 | 36 | #define VGIC_MAX_IRQS 1024 |
37 | #define VGIC_V2_MAX_CPUS 8 | 37 | #define VGIC_V2_MAX_CPUS 8 |
38 | 38 | #define VGIC_V3_MAX_CPUS 255 | |
39 | /* Sanity checks... */ | ||
40 | #if (KVM_MAX_VCPUS > 255) | ||
41 | #error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now | ||
42 | #endif | ||
43 | 39 | ||
44 | #if (VGIC_NR_IRQS_LEGACY & 31) | 40 | #if (VGIC_NR_IRQS_LEGACY & 31) |
45 | #error "VGIC_NR_IRQS must be a multiple of 32" | 41 | #error "VGIC_NR_IRQS must be a multiple of 32" |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -217,6 +217,7 @@ struct pci_dev; | |||
217 | 217 | ||
218 | int acpi_pci_irq_enable (struct pci_dev *dev); | 218 | int acpi_pci_irq_enable (struct pci_dev *dev); |
219 | void acpi_penalize_isa_irq(int irq, int active); | 219 | void acpi_penalize_isa_irq(int irq, int active); |
220 | bool acpi_isa_irq_available(int irq); | ||
220 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); | 221 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
221 | void acpi_pci_irq_disable (struct pci_dev *dev); | 222 | void acpi_pci_irq_disable (struct pci_dev *dev); |
222 | 223 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5a5d79ee256f..d5eb4ad1c534 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/blkdev.h> | 14 | #include <linux/blkdev.h> |
15 | #include <linux/writeback.h> | 15 | #include <linux/writeback.h> |
16 | #include <linux/memcontrol.h> | ||
16 | #include <linux/blk-cgroup.h> | 17 | #include <linux/blk-cgroup.h> |
17 | #include <linux/backing-dev-defs.h> | 18 | #include <linux/backing-dev-defs.h> |
18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits); | |||
252 | * @inode: inode of interest | 253 | * @inode: inode of interest |
253 | * | 254 | * |
254 | * cgroup writeback requires support from both the bdi and filesystem. | 255 | * cgroup writeback requires support from both the bdi and filesystem. |
255 | * Test whether @inode has both. | 256 | * Also, both memcg and iocg have to be on the default hierarchy. Test |
257 | * whether all conditions are met. | ||
258 | * | ||
259 | * Note that the test result may change dynamically on the same inode | ||
260 | * depending on how memcg and iocg are configured. | ||
256 | */ | 261 | */ |
257 | static inline bool inode_cgwb_enabled(struct inode *inode) | 262 | static inline bool inode_cgwb_enabled(struct inode *inode) |
258 | { | 263 | { |
259 | struct backing_dev_info *bdi = inode_to_bdi(inode); | 264 | struct backing_dev_info *bdi = inode_to_bdi(inode); |
260 | 265 | ||
261 | return bdi_cap_account_dirty(bdi) && | 266 | return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && |
267 | cgroup_on_dfl(blkcg_root_css->cgroup) && | ||
268 | bdi_cap_account_dirty(bdi) && | ||
262 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && | 269 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && |
263 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); | 270 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); |
264 | } | 271 | } |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -145,7 +145,6 @@ enum { | |||
145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | 145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
146 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
147 | BLK_MQ_F_SG_MERGE = 1 << 2, | 147 | BLK_MQ_F_SG_MERGE = 1 << 2, |
148 | BLK_MQ_F_SYSFS_UP = 1 << 3, | ||
149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | 148 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
150 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | 149 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
151 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | 150 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | |||
215 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 214 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
216 | void blk_mq_kick_requeue_list(struct request_queue *q); | 215 | void blk_mq_kick_requeue_list(struct request_queue *q); |
217 | void blk_mq_abort_requeue_list(struct request_queue *q); | 216 | void blk_mq_abort_requeue_list(struct request_queue *q); |
218 | void blk_mq_complete_request(struct request *rq); | 217 | void blk_mq_complete_request(struct request *rq, int error); |
219 | 218 | ||
220 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 219 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
221 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 220 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); | |||
224 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 223 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
225 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); | 224 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
226 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 225 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
227 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | ||
228 | void *priv); | ||
229 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | 226 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
230 | void *priv); | 227 | void *priv); |
231 | void blk_mq_freeze_queue(struct request_queue *q); | 228 | void blk_mq_freeze_queue(struct request_queue *q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -456,6 +456,8 @@ struct request_queue { | |||
456 | struct blk_mq_tag_set *tag_set; | 456 | struct blk_mq_tag_set *tag_set; |
457 | struct list_head tag_set_list; | 457 | struct list_head tag_set_list; |
458 | struct bio_set *bio_split; | 458 | struct bio_set *bio_split; |
459 | |||
460 | bool mq_sysfs_init_done; | ||
459 | }; | 461 | }; |
460 | 462 | ||
461 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 463 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
@@ -1368,6 +1370,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, | |||
1368 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); | 1370 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
1369 | } | 1371 | } |
1370 | 1372 | ||
1373 | static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, | ||
1374 | struct bio *next) | ||
1375 | { | ||
1376 | if (!bio_has_data(prev)) | ||
1377 | return false; | ||
1378 | |||
1379 | return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
1380 | next->bi_io_vec[0].bv_offset); | ||
1381 | } | ||
1382 | |||
1383 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | ||
1384 | { | ||
1385 | return bio_will_gap(req->q, req->biotail, bio); | ||
1386 | } | ||
1387 | |||
1388 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | ||
1389 | { | ||
1390 | return bio_will_gap(req->q, bio, req->bio); | ||
1391 | } | ||
1392 | |||
1371 | struct work_struct; | 1393 | struct work_struct; |
1372 | int kblockd_schedule_work(struct work_struct *work); | 1394 | int kblockd_schedule_work(struct work_struct *work); |
1373 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); | 1395 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); |
@@ -1494,6 +1516,26 @@ queue_max_integrity_segments(struct request_queue *q) | |||
1494 | return q->limits.max_integrity_segments; | 1516 | return q->limits.max_integrity_segments; |
1495 | } | 1517 | } |
1496 | 1518 | ||
1519 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
1520 | struct bio *next) | ||
1521 | { | ||
1522 | struct bio_integrity_payload *bip = bio_integrity(req->bio); | ||
1523 | struct bio_integrity_payload *bip_next = bio_integrity(next); | ||
1524 | |||
1525 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
1526 | bip_next->bip_vec[0].bv_offset); | ||
1527 | } | ||
1528 | |||
1529 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
1530 | struct bio *bio) | ||
1531 | { | ||
1532 | struct bio_integrity_payload *bip = bio_integrity(bio); | ||
1533 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); | ||
1534 | |||
1535 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
1536 | bip_next->bip_vec[0].bv_offset); | ||
1537 | } | ||
1538 | |||
1497 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1539 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1498 | 1540 | ||
1499 | struct bio; | 1541 | struct bio; |
@@ -1560,6 +1602,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) | |||
1560 | { | 1602 | { |
1561 | return 0; | 1603 | return 0; |
1562 | } | 1604 | } |
1605 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
1606 | struct bio *next) | ||
1607 | { | ||
1608 | return false; | ||
1609 | } | ||
1610 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
1611 | struct bio *bio) | ||
1612 | { | ||
1613 | return false; | ||
1614 | } | ||
1563 | 1615 | ||
1564 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1616 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1565 | 1617 | ||
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 4763ad64e832..f89b31d45cc8 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h | |||
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features) | |||
107 | CEPH_FEATURE_OSDMAP_ENC | \ | 107 | CEPH_FEATURE_OSDMAP_ENC | \ |
108 | CEPH_FEATURE_CRUSH_TUNABLES3 | \ | 108 | CEPH_FEATURE_CRUSH_TUNABLES3 | \ |
109 | CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ | 109 | CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ |
110 | CEPH_FEATURE_MSGR_KEEPALIVE2 | \ | ||
110 | CEPH_FEATURE_CRUSH_V4) | 111 | CEPH_FEATURE_CRUSH_V4) |
111 | 112 | ||
112 | #define CEPH_FEATURES_REQUIRED_DEFAULT \ | 113 | #define CEPH_FEATURES_REQUIRED_DEFAULT \ |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 7e1252e97a30..b2371d9b51fa 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
@@ -238,6 +238,8 @@ struct ceph_connection { | |||
238 | bool out_kvec_is_msg; /* kvec refers to out_msg */ | 238 | bool out_kvec_is_msg; /* kvec refers to out_msg */ |
239 | int out_more; /* there is more data after the kvecs */ | 239 | int out_more; /* there is more data after the kvecs */ |
240 | __le64 out_temp_ack; /* for writing an ack */ | 240 | __le64 out_temp_ack; /* for writing an ack */ |
241 | struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 | ||
242 | stamp */ | ||
241 | 243 | ||
242 | /* message in temps */ | 244 | /* message in temps */ |
243 | struct ceph_msg_header in_hdr; | 245 | struct ceph_msg_header in_hdr; |
@@ -248,7 +250,7 @@ struct ceph_connection { | |||
248 | int in_base_pos; /* bytes read */ | 250 | int in_base_pos; /* bytes read */ |
249 | __le64 in_temp_ack; /* for reading an ack */ | 251 | __le64 in_temp_ack; /* for reading an ack */ |
250 | 252 | ||
251 | struct timespec last_keepalive_ack; | 253 | struct timespec last_keepalive_ack; /* keepalive2 ack stamp */ |
252 | 254 | ||
253 | struct delayed_work work; /* send|recv work */ | 255 | struct delayed_work work; /* send|recv work */ |
254 | unsigned long delay; /* current delay interval */ | 256 | unsigned long delay; /* current delay interval */ |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 4d8fcf2187dc..8492721b39be 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -473,31 +473,8 @@ struct cgroup_subsys { | |||
473 | unsigned int depends_on; | 473 | unsigned int depends_on; |
474 | }; | 474 | }; |
475 | 475 | ||
476 | extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; | 476 | void cgroup_threadgroup_change_begin(struct task_struct *tsk); |
477 | 477 | void cgroup_threadgroup_change_end(struct task_struct *tsk); | |
478 | /** | ||
479 | * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups | ||
480 | * @tsk: target task | ||
481 | * | ||
482 | * Called from threadgroup_change_begin() and allows cgroup operations to | ||
483 | * synchronize against threadgroup changes using a percpu_rw_semaphore. | ||
484 | */ | ||
485 | static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||
486 | { | ||
487 | percpu_down_read(&cgroup_threadgroup_rwsem); | ||
488 | } | ||
489 | |||
490 | /** | ||
491 | * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups | ||
492 | * @tsk: target task | ||
493 | * | ||
494 | * Called from threadgroup_change_end(). Counterpart of | ||
495 | * cgroup_threadcgroup_change_begin(). | ||
496 | */ | ||
497 | static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||
498 | { | ||
499 | percpu_up_read(&cgroup_threadgroup_rwsem); | ||
500 | } | ||
501 | 478 | ||
502 | #else /* CONFIG_CGROUPS */ | 479 | #else /* CONFIG_CGROUPS */ |
503 | 480 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 31ce435981fe..bdcf358dfce2 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -18,15 +18,6 @@ | |||
18 | struct clock_event_device; | 18 | struct clock_event_device; |
19 | struct module; | 19 | struct module; |
20 | 20 | ||
21 | /* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ | ||
22 | enum clock_event_mode { | ||
23 | CLOCK_EVT_MODE_UNUSED, | ||
24 | CLOCK_EVT_MODE_SHUTDOWN, | ||
25 | CLOCK_EVT_MODE_PERIODIC, | ||
26 | CLOCK_EVT_MODE_ONESHOT, | ||
27 | CLOCK_EVT_MODE_RESUME, | ||
28 | }; | ||
29 | |||
30 | /* | 21 | /* |
31 | * Possible states of a clock event device. | 22 | * Possible states of a clock event device. |
32 | * | 23 | * |
@@ -86,16 +77,14 @@ enum clock_event_state { | |||
86 | * @min_delta_ns: minimum delta value in ns | 77 | * @min_delta_ns: minimum delta value in ns |
87 | * @mult: nanosecond to cycles multiplier | 78 | * @mult: nanosecond to cycles multiplier |
88 | * @shift: nanoseconds to cycles divisor (power of two) | 79 | * @shift: nanoseconds to cycles divisor (power of two) |
89 | * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE | ||
90 | * @state_use_accessors:current state of the device, assigned by the core code | 80 | * @state_use_accessors:current state of the device, assigned by the core code |
91 | * @features: features | 81 | * @features: features |
92 | * @retries: number of forced programming retries | 82 | * @retries: number of forced programming retries |
93 | * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. | 83 | * @set_state_periodic: switch state to periodic |
94 | * @set_state_periodic: switch state to periodic, if !set_mode | 84 | * @set_state_oneshot: switch state to oneshot |
95 | * @set_state_oneshot: switch state to oneshot, if !set_mode | 85 | * @set_state_oneshot_stopped: switch state to oneshot_stopped |
96 | * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode | 86 | * @set_state_shutdown: switch state to shutdown |
97 | * @set_state_shutdown: switch state to shutdown, if !set_mode | 87 | * @tick_resume: resume clkevt device |
98 | * @tick_resume: resume clkevt device, if !set_mode | ||
99 | * @broadcast: function to broadcast events | 88 | * @broadcast: function to broadcast events |
100 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration | 89 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration |
101 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration | 90 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration |
@@ -116,18 +105,10 @@ struct clock_event_device { | |||
116 | u64 min_delta_ns; | 105 | u64 min_delta_ns; |
117 | u32 mult; | 106 | u32 mult; |
118 | u32 shift; | 107 | u32 shift; |
119 | enum clock_event_mode mode; | ||
120 | enum clock_event_state state_use_accessors; | 108 | enum clock_event_state state_use_accessors; |
121 | unsigned int features; | 109 | unsigned int features; |
122 | unsigned long retries; | 110 | unsigned long retries; |
123 | 111 | ||
124 | /* | ||
125 | * State transition callback(s): Only one of the two groups should be | ||
126 | * defined: | ||
127 | * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. | ||
128 | * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume(). | ||
129 | */ | ||
130 | void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); | ||
131 | int (*set_state_periodic)(struct clock_event_device *); | 112 | int (*set_state_periodic)(struct clock_event_device *); |
132 | int (*set_state_oneshot)(struct clock_event_device *); | 113 | int (*set_state_oneshot)(struct clock_event_device *); |
133 | int (*set_state_oneshot_stopped)(struct clock_event_device *); | 114 | int (*set_state_oneshot_stopped)(struct clock_event_device *); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 430efcbea48e..dca22de98d94 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -127,9 +127,14 @@ struct cpufreq_policy { | |||
127 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ | 127 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
128 | 128 | ||
129 | #ifdef CONFIG_CPU_FREQ | 129 | #ifdef CONFIG_CPU_FREQ |
130 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); | ||
130 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 131 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
131 | void cpufreq_cpu_put(struct cpufreq_policy *policy); | 132 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
132 | #else | 133 | #else |
134 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) | ||
135 | { | ||
136 | return NULL; | ||
137 | } | ||
133 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 138 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
134 | { | 139 | { |
135 | return NULL; | 140 | return NULL; |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index ce447f0f1bad..68030e22af35 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -65,7 +65,10 @@ struct devfreq_dev_status { | |||
65 | * The "flags" parameter's possible values are | 65 | * The "flags" parameter's possible values are |
66 | * explained above with "DEVFREQ_FLAG_*" macros. | 66 | * explained above with "DEVFREQ_FLAG_*" macros. |
67 | * @get_dev_status: The device should provide the current performance | 67 | * @get_dev_status: The device should provide the current performance |
68 | * status to devfreq, which is used by governors. | 68 | * status to devfreq. Governors are recommended not to |
69 | * use this directly. Instead, governors are recommended | ||
70 | * to use devfreq_update_stats() along with | ||
71 | * devfreq.last_status. | ||
69 | * @get_cur_freq: The device should provide the current frequency | 72 | * @get_cur_freq: The device should provide the current frequency |
70 | * at which it is operating. | 73 | * at which it is operating. |
71 | * @exit: An optional callback that is called when devfreq | 74 | * @exit: An optional callback that is called when devfreq |
@@ -161,6 +164,7 @@ struct devfreq { | |||
161 | struct delayed_work work; | 164 | struct delayed_work work; |
162 | 165 | ||
163 | unsigned long previous_freq; | 166 | unsigned long previous_freq; |
167 | struct devfreq_dev_status last_status; | ||
164 | 168 | ||
165 | void *data; /* private data for governors */ | 169 | void *data; /* private data for governors */ |
166 | 170 | ||
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, | |||
204 | extern void devm_devfreq_unregister_opp_notifier(struct device *dev, | 208 | extern void devm_devfreq_unregister_opp_notifier(struct device *dev, |
205 | struct devfreq *devfreq); | 209 | struct devfreq *devfreq); |
206 | 210 | ||
211 | /** | ||
212 | * devfreq_update_stats() - update the last_status pointer in struct devfreq | ||
213 | * @df: the devfreq instance whose status needs updating | ||
214 | * | ||
215 | * Governors are recommended to use this function along with last_status, | ||
216 | * which allows other entities to reuse the last_status without affecting | ||
217 | * the values fetched later by governors. | ||
218 | */ | ||
219 | static inline int devfreq_update_stats(struct devfreq *df) | ||
220 | { | ||
221 | return df->profile->get_dev_status(df->dev.parent, &df->last_status); | ||
222 | } | ||
223 | |||
207 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) | 224 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
208 | /** | 225 | /** |
209 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq | 226 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq |
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, | |||
289 | struct devfreq *devfreq) | 306 | struct devfreq *devfreq) |
290 | { | 307 | { |
291 | } | 308 | } |
309 | |||
310 | static inline int devfreq_update_stats(struct devfreq *df) | ||
311 | { | ||
312 | return -EINVAL; | ||
313 | } | ||
292 | #endif /* CONFIG_PM_DEVFREQ */ | 314 | #endif /* CONFIG_PM_DEVFREQ */ |
293 | 315 | ||
294 | #endif /* __LINUX_DEVFREQ_H__ */ | 316 | #endif /* __LINUX_DEVFREQ_H__ */ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d0b380ee7d67..e38681f4912d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -25,6 +25,13 @@ | |||
25 | extern struct files_struct init_files; | 25 | extern struct files_struct init_files; |
26 | extern struct fs_struct init_fs; | 26 | extern struct fs_struct init_fs; |
27 | 27 | ||
28 | #ifdef CONFIG_CGROUPS | ||
29 | #define INIT_GROUP_RWSEM(sig) \ | ||
30 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), | ||
31 | #else | ||
32 | #define INIT_GROUP_RWSEM(sig) | ||
33 | #endif | ||
34 | |||
28 | #ifdef CONFIG_CPUSETS | 35 | #ifdef CONFIG_CPUSETS |
29 | #define INIT_CPUSET_SEQ(tsk) \ | 36 | #define INIT_CPUSET_SEQ(tsk) \ |
30 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), | 37 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), |
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs; | |||
57 | INIT_PREV_CPUTIME(sig) \ | 64 | INIT_PREV_CPUTIME(sig) \ |
58 | .cred_guard_mutex = \ | 65 | .cred_guard_mutex = \ |
59 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 66 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
67 | INIT_GROUP_RWSEM(sig) \ | ||
60 | } | 68 | } |
61 | 69 | ||
62 | extern struct nsproxy init_nsproxy; | 70 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | |||
68 | return iova >> iova_shift(iovad); | 68 | return iova >> iova_shift(iovad); |
69 | } | 69 | } |
70 | 70 | ||
71 | int iommu_iova_cache_init(void); | 71 | int iova_cache_get(void); |
72 | void iommu_iova_cache_destroy(void); | 72 | void iova_cache_put(void); |
73 | 73 | ||
74 | struct iova *alloc_iova_mem(void); | 74 | struct iova *alloc_iova_mem(void); |
75 | void free_iova_mem(struct iova *iova); | 75 | void free_iova_mem(struct iova *iova); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 6f8b34066442..11bf09288ddb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -110,8 +110,8 @@ enum { | |||
110 | /* | 110 | /* |
111 | * Return value for chip->irq_set_affinity() | 111 | * Return value for chip->irq_set_affinity() |
112 | * | 112 | * |
113 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 113 | * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity |
114 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 114 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity |
115 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to | 115 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to |
116 | * support stacked irqchips, which indicates skipping | 116 | * support stacked irqchips, which indicates skipping |
117 | * all descendent irqchips. | 117 | * all descendent irqchips. |
@@ -129,9 +129,19 @@ struct irq_domain; | |||
129 | * struct irq_common_data - per irq data shared by all irqchips | 129 | * struct irq_common_data - per irq data shared by all irqchips |
130 | * @state_use_accessors: status information for irq chip functions. | 130 | * @state_use_accessors: status information for irq chip functions. |
131 | * Use accessor functions to deal with it | 131 | * Use accessor functions to deal with it |
132 | * @node: node index useful for balancing | ||
133 | * @handler_data: per-IRQ data for the irq_chip methods | ||
134 | * @affinity: IRQ affinity on SMP | ||
135 | * @msi_desc: MSI descriptor | ||
132 | */ | 136 | */ |
133 | struct irq_common_data { | 137 | struct irq_common_data { |
134 | unsigned int state_use_accessors; | 138 | unsigned int state_use_accessors; |
139 | #ifdef CONFIG_NUMA | ||
140 | unsigned int node; | ||
141 | #endif | ||
142 | void *handler_data; | ||
143 | struct msi_desc *msi_desc; | ||
144 | cpumask_var_t affinity; | ||
135 | }; | 145 | }; |
136 | 146 | ||
137 | /** | 147 | /** |
@@ -139,38 +149,26 @@ struct irq_common_data { | |||
139 | * @mask: precomputed bitmask for accessing the chip registers | 149 | * @mask: precomputed bitmask for accessing the chip registers |
140 | * @irq: interrupt number | 150 | * @irq: interrupt number |
141 | * @hwirq: hardware interrupt number, local to the interrupt domain | 151 | * @hwirq: hardware interrupt number, local to the interrupt domain |
142 | * @node: node index useful for balancing | ||
143 | * @common: point to data shared by all irqchips | 152 | * @common: point to data shared by all irqchips |
144 | * @chip: low level interrupt hardware access | 153 | * @chip: low level interrupt hardware access |
145 | * @domain: Interrupt translation domain; responsible for mapping | 154 | * @domain: Interrupt translation domain; responsible for mapping |
146 | * between hwirq number and linux irq number. | 155 | * between hwirq number and linux irq number. |
147 | * @parent_data: pointer to parent struct irq_data to support hierarchy | 156 | * @parent_data: pointer to parent struct irq_data to support hierarchy |
148 | * irq_domain | 157 | * irq_domain |
149 | * @handler_data: per-IRQ data for the irq_chip methods | ||
150 | * @chip_data: platform-specific per-chip private data for the chip | 158 | * @chip_data: platform-specific per-chip private data for the chip |
151 | * methods, to allow shared chip implementations | 159 | * methods, to allow shared chip implementations |
152 | * @msi_desc: MSI descriptor | ||
153 | * @affinity: IRQ affinity on SMP | ||
154 | * | ||
155 | * The fields here need to overlay the ones in irq_desc until we | ||
156 | * cleaned up the direct references and switched everything over to | ||
157 | * irq_data. | ||
158 | */ | 160 | */ |
159 | struct irq_data { | 161 | struct irq_data { |
160 | u32 mask; | 162 | u32 mask; |
161 | unsigned int irq; | 163 | unsigned int irq; |
162 | unsigned long hwirq; | 164 | unsigned long hwirq; |
163 | unsigned int node; | ||
164 | struct irq_common_data *common; | 165 | struct irq_common_data *common; |
165 | struct irq_chip *chip; | 166 | struct irq_chip *chip; |
166 | struct irq_domain *domain; | 167 | struct irq_domain *domain; |
167 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 168 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
168 | struct irq_data *parent_data; | 169 | struct irq_data *parent_data; |
169 | #endif | 170 | #endif |
170 | void *handler_data; | ||
171 | void *chip_data; | 171 | void *chip_data; |
172 | struct msi_desc *msi_desc; | ||
173 | cpumask_var_t affinity; | ||
174 | }; | 172 | }; |
175 | 173 | ||
176 | /* | 174 | /* |
@@ -190,6 +188,7 @@ struct irq_data { | |||
190 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 188 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
191 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 189 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
192 | * IRQD_WAKEUP_ARMED - Wakeup mode armed | 190 | * IRQD_WAKEUP_ARMED - Wakeup mode armed |
191 | * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU | ||
193 | */ | 192 | */ |
194 | enum { | 193 | enum { |
195 | IRQD_TRIGGER_MASK = 0xf, | 194 | IRQD_TRIGGER_MASK = 0xf, |
@@ -204,6 +203,7 @@ enum { | |||
204 | IRQD_IRQ_MASKED = (1 << 17), | 203 | IRQD_IRQ_MASKED = (1 << 17), |
205 | IRQD_IRQ_INPROGRESS = (1 << 18), | 204 | IRQD_IRQ_INPROGRESS = (1 << 18), |
206 | IRQD_WAKEUP_ARMED = (1 << 19), | 205 | IRQD_WAKEUP_ARMED = (1 << 19), |
206 | IRQD_FORWARDED_TO_VCPU = (1 << 20), | ||
207 | }; | 207 | }; |
208 | 208 | ||
209 | #define __irqd_to_state(d) ((d)->common->state_use_accessors) | 209 | #define __irqd_to_state(d) ((d)->common->state_use_accessors) |
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) | |||
282 | return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; | 282 | return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; |
283 | } | 283 | } |
284 | 284 | ||
285 | static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) | ||
286 | { | ||
287 | return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; | ||
288 | } | ||
289 | |||
290 | static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) | ||
291 | { | ||
292 | __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; | ||
293 | } | ||
294 | |||
295 | static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) | ||
296 | { | ||
297 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; | ||
298 | } | ||
285 | 299 | ||
286 | /* | 300 | /* |
287 | * Functions for chained handlers which can be enabled/disabled by the | 301 | * Functions for chained handlers which can be enabled/disabled by the |
@@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq) | |||
461 | * Built-in IRQ handlers for various IRQ types, | 475 | * Built-in IRQ handlers for various IRQ types, |
462 | * callable via desc->handle_irq() | 476 | * callable via desc->handle_irq() |
463 | */ | 477 | */ |
464 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 478 | extern void handle_level_irq(struct irq_desc *desc); |
465 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 479 | extern void handle_fasteoi_irq(struct irq_desc *desc); |
466 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 480 | extern void handle_edge_irq(struct irq_desc *desc); |
467 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 481 | extern void handle_edge_eoi_irq(struct irq_desc *desc); |
468 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 482 | extern void handle_simple_irq(struct irq_desc *desc); |
469 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 483 | extern void handle_percpu_irq(struct irq_desc *desc); |
470 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | 484 | extern void handle_percpu_devid_irq(struct irq_desc *desc); |
471 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 485 | extern void handle_bad_irq(struct irq_desc *desc); |
472 | extern void handle_nested_irq(unsigned int irq); | 486 | extern void handle_nested_irq(unsigned int irq); |
473 | 487 | ||
474 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); | 488 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); |
@@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | |||
627 | static inline void *irq_get_handler_data(unsigned int irq) | 641 | static inline void *irq_get_handler_data(unsigned int irq) |
628 | { | 642 | { |
629 | struct irq_data *d = irq_get_irq_data(irq); | 643 | struct irq_data *d = irq_get_irq_data(irq); |
630 | return d ? d->handler_data : NULL; | 644 | return d ? d->common->handler_data : NULL; |
631 | } | 645 | } |
632 | 646 | ||
633 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 647 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
634 | { | 648 | { |
635 | return d->handler_data; | 649 | return d->common->handler_data; |
636 | } | 650 | } |
637 | 651 | ||
638 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 652 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
639 | { | 653 | { |
640 | struct irq_data *d = irq_get_irq_data(irq); | 654 | struct irq_data *d = irq_get_irq_data(irq); |
641 | return d ? d->msi_desc : NULL; | 655 | return d ? d->common->msi_desc : NULL; |
642 | } | 656 | } |
643 | 657 | ||
644 | static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) | 658 | static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) |
645 | { | 659 | { |
646 | return d->msi_desc; | 660 | return d->common->msi_desc; |
647 | } | 661 | } |
648 | 662 | ||
649 | static inline u32 irq_get_trigger_type(unsigned int irq) | 663 | static inline u32 irq_get_trigger_type(unsigned int irq) |
@@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq) | |||
652 | return d ? irqd_get_trigger_type(d) : 0; | 666 | return d ? irqd_get_trigger_type(d) : 0; |
653 | } | 667 | } |
654 | 668 | ||
655 | static inline int irq_data_get_node(struct irq_data *d) | 669 | static inline int irq_common_data_get_node(struct irq_common_data *d) |
656 | { | 670 | { |
671 | #ifdef CONFIG_NUMA | ||
657 | return d->node; | 672 | return d->node; |
673 | #else | ||
674 | return 0; | ||
675 | #endif | ||
676 | } | ||
677 | |||
678 | static inline int irq_data_get_node(struct irq_data *d) | ||
679 | { | ||
680 | return irq_common_data_get_node(d->common); | ||
658 | } | 681 | } |
659 | 682 | ||
660 | static inline struct cpumask *irq_get_affinity_mask(int irq) | 683 | static inline struct cpumask *irq_get_affinity_mask(int irq) |
661 | { | 684 | { |
662 | struct irq_data *d = irq_get_irq_data(irq); | 685 | struct irq_data *d = irq_get_irq_data(irq); |
663 | 686 | ||
664 | return d ? d->affinity : NULL; | 687 | return d ? d->common->affinity : NULL; |
665 | } | 688 | } |
666 | 689 | ||
667 | static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) | 690 | static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) |
668 | { | 691 | { |
669 | return d->affinity; | 692 | return d->common->affinity; |
670 | } | 693 | } |
671 | 694 | ||
672 | unsigned int arch_dynirq_lower_bound(unsigned int from); | 695 | unsigned int arch_dynirq_lower_bound(unsigned int from); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5acfa26602e1..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; | |||
98 | 98 | ||
99 | static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) | 99 | static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) |
100 | { | 100 | { |
101 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 101 | return container_of(data->common, struct irq_desc, irq_common_data); |
102 | return irq_to_desc(data->irq); | ||
103 | #else | ||
104 | return container_of(data, struct irq_desc, irq_data); | ||
105 | #endif | ||
106 | } | 102 | } |
107 | 103 | ||
108 | static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) | 104 | static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) |
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | |||
127 | 123 | ||
128 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | 124 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) |
129 | { | 125 | { |
130 | return desc->irq_data.handler_data; | 126 | return desc->irq_common_data.handler_data; |
131 | } | 127 | } |
132 | 128 | ||
133 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | 129 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) |
134 | { | 130 | { |
135 | return desc->irq_data.msi_desc; | 131 | return desc->irq_common_data.msi_desc; |
136 | } | 132 | } |
137 | 133 | ||
138 | /* | 134 | /* |
139 | * Architectures call this to let the generic IRQ layer | 135 | * Architectures call this to let the generic IRQ layer |
140 | * handle an interrupt. If the descriptor is attached to an | 136 | * handle an interrupt. |
141 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
142 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
143 | */ | 137 | */ |
144 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | 138 | static inline void generic_handle_irq_desc(struct irq_desc *desc) |
145 | { | 139 | { |
146 | desc->handle_irq(irq, desc); | 140 | desc->handle_irq(desc); |
147 | } | 141 | } |
148 | 142 | ||
149 | int generic_handle_irq(unsigned int irq); | 143 | int generic_handle_irq(unsigned int irq); |
@@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq) | |||
176 | return irq_desc_has_action(irq_to_desc(irq)); | 170 | return irq_desc_has_action(irq_to_desc(irq)); |
177 | } | 171 | } |
178 | 172 | ||
179 | /* caller has locked the irq_desc and both params are valid */ | ||
180 | static inline void __irq_set_handler_locked(unsigned int irq, | ||
181 | irq_flow_handler_t handler) | ||
182 | { | ||
183 | struct irq_desc *desc; | ||
184 | |||
185 | desc = irq_to_desc(irq); | ||
186 | desc->handle_irq = handler; | ||
187 | } | ||
188 | |||
189 | /* caller has locked the irq_desc and both params are valid */ | ||
190 | static inline void | ||
191 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | ||
192 | irq_flow_handler_t handler, const char *name) | ||
193 | { | ||
194 | struct irq_desc *desc; | ||
195 | |||
196 | desc = irq_to_desc(irq); | ||
197 | irq_desc_get_irq_data(desc)->chip = chip; | ||
198 | desc->handle_irq = handler; | ||
199 | desc->name = name; | ||
200 | } | ||
201 | |||
202 | /** | 173 | /** |
203 | * irq_set_handler_locked - Set irq handler from a locked region | 174 | * irq_set_handler_locked - Set irq handler from a locked region |
204 | * @data: Pointer to the irq_data structure which identifies the irq | 175 | * @data: Pointer to the irq_data structure which identifies the irq |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..f644fdb06dd6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -161,6 +161,11 @@ enum { | |||
161 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), | 161 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) | ||
165 | { | ||
166 | return d->of_node; | ||
167 | } | ||
168 | |||
164 | #ifdef CONFIG_IRQ_DOMAIN | 169 | #ifdef CONFIG_IRQ_DOMAIN |
165 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 170 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
166 | irq_hw_number_t hwirq_max, int direct_max, | 171 | irq_hw_number_t hwirq_max, int direct_max, |
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | struct irq_desc; | 9 | struct irq_desc; |
10 | struct irq_data; | 10 | struct irq_data; |
11 | typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); | 11 | typedef void (*irq_flow_handler_t)(struct irq_desc *desc); |
12 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | 12 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
13 | 13 | ||
14 | #endif | 14 | #endif |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7f653e8f6690..f1094238ab2a 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -21,8 +21,8 @@ | |||
21 | * | 21 | * |
22 | * DEFINE_STATIC_KEY_TRUE(key); | 22 | * DEFINE_STATIC_KEY_TRUE(key); |
23 | * DEFINE_STATIC_KEY_FALSE(key); | 23 | * DEFINE_STATIC_KEY_FALSE(key); |
24 | * static_key_likely() | 24 | * static_branch_likely() |
25 | * statick_key_unlikely() | 25 | * static_branch_unlikely() |
26 | * | 26 | * |
27 | * Jump labels provide an interface to generate dynamic branches using | 27 | * Jump labels provide an interface to generate dynamic branches using |
28 | * self-modifying code. Assuming toolchain and architecture support, if we | 28 | * self-modifying code. Assuming toolchain and architecture support, if we |
@@ -45,12 +45,10 @@ | |||
45 | * statement, setting the key to true requires us to patch in a jump | 45 | * statement, setting the key to true requires us to patch in a jump |
46 | * to the out-of-line of true branch. | 46 | * to the out-of-line of true branch. |
47 | * | 47 | * |
48 | * In addtion to static_branch_{enable,disable}, we can also reference count | 48 | * In addition to static_branch_{enable,disable}, we can also reference count |
49 | * the key or branch direction via static_branch_{inc,dec}. Thus, | 49 | * the key or branch direction via static_branch_{inc,dec}. Thus, |
50 | * static_branch_inc() can be thought of as a 'make more true' and | 50 | * static_branch_inc() can be thought of as a 'make more true' and |
51 | * static_branch_dec() as a 'make more false'. The inc()/dec() | 51 | * static_branch_dec() as a 'make more false'. |
52 | * interface is meant to be used exclusively from the inc()/dec() for a given | ||
53 | * key. | ||
54 | * | 52 | * |
55 | * Since this relies on modifying code, the branch modifying functions | 53 | * Since this relies on modifying code, the branch modifying functions |
56 | * must be considered absolute slow paths (machine wide synchronization etc.). | 54 | * must be considered absolute slow paths (machine wide synchronization etc.). |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..6452ff4c463f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -242,7 +242,6 @@ struct mem_cgroup { | |||
242 | * percpu counter. | 242 | * percpu counter. |
243 | */ | 243 | */ |
244 | struct mem_cgroup_stat_cpu __percpu *stat; | 244 | struct mem_cgroup_stat_cpu __percpu *stat; |
245 | spinlock_t pcp_counter_lock; | ||
246 | 245 | ||
247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 246 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
248 | struct cg_proto tcp_mem; | 247 | struct cg_proto tcp_mem; |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
403 | }; | 403 | }; |
404 | 404 | ||
405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
406 | struct mlx5_inbox_hdr hdr; | ||
407 | u8 rsvd[8]; | ||
408 | }; | ||
409 | |||
410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
411 | struct mlx5_outbox_hdr hdr; | ||
412 | __be32 dump_fill_mkey; | ||
413 | __be32 resd_lkey; | ||
414 | }; | ||
415 | |||
416 | struct mlx5_cmd_layout { | 405 | struct mlx5_cmd_layout { |
417 | u8 type; | 406 | u8 type; |
418 | u8 rsvd0[3]; | 407 | u8 rsvd0[3]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
845 | int mlx5_register_interface(struct mlx5_interface *intf); | 845 | int mlx5_register_interface(struct mlx5_interface *intf); |
846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
849 | 848 | ||
850 | struct mlx5_profile { | 849 | struct mlx5_profile { |
851 | u64 mask; | 850 | u64 mask; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
905 | #endif | 905 | #endif |
906 | } | 906 | } |
907 | 907 | ||
908 | #ifdef CONFIG_MEMCG | ||
909 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
910 | { | ||
911 | return page->mem_cgroup; | ||
912 | } | ||
913 | |||
914 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
915 | { | ||
916 | page->mem_cgroup = memcg; | ||
917 | } | ||
918 | #else | ||
919 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
920 | { | ||
921 | return NULL; | ||
922 | } | ||
923 | |||
924 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
925 | { | ||
926 | } | ||
927 | #endif | ||
928 | |||
908 | /* | 929 | /* |
909 | * Some inline functions in vmstat.h depend on page_zone() | 930 | * Some inline functions in vmstat.h depend on page_zone() |
910 | */ | 931 | */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88a00694eda5..2d15e3831440 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n) | |||
507 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 507 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); |
508 | smp_mb__before_atomic(); | 508 | smp_mb__before_atomic(); |
509 | clear_bit(NAPI_STATE_SCHED, &n->state); | 509 | clear_bit(NAPI_STATE_SCHED, &n->state); |
510 | clear_bit(NAPI_STATE_NPSVC, &n->state); | ||
510 | } | 511 | } |
511 | 512 | ||
512 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 962387a192f1..4a4e3a092337 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/ethtool.h> | 20 | #include <linux/ethtool.h> |
21 | #include <linux/mii.h> | 21 | #include <linux/mii.h> |
22 | #include <linux/module.h> | ||
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
24 | #include <linux/mod_devicetable.h> | 25 | #include <linux/mod_devicetable.h> |
@@ -153,6 +154,7 @@ struct sk_buff; | |||
153 | * PHYs should register using this structure | 154 | * PHYs should register using this structure |
154 | */ | 155 | */ |
155 | struct mii_bus { | 156 | struct mii_bus { |
157 | struct module *owner; | ||
156 | const char *name; | 158 | const char *name; |
157 | char id[MII_BUS_ID_SIZE]; | 159 | char id[MII_BUS_ID_SIZE]; |
158 | void *priv; | 160 | void *priv; |
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void) | |||
198 | return mdiobus_alloc_size(0); | 200 | return mdiobus_alloc_size(0); |
199 | } | 201 | } |
200 | 202 | ||
201 | int mdiobus_register(struct mii_bus *bus); | 203 | int __mdiobus_register(struct mii_bus *bus, struct module *owner); |
204 | #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) | ||
202 | void mdiobus_unregister(struct mii_bus *bus); | 205 | void mdiobus_unregister(struct mii_bus *bus); |
203 | void mdiobus_free(struct mii_bus *bus); | 206 | void mdiobus_free(struct mii_bus *bus); |
204 | struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); | 207 | struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); |
@@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, | |||
742 | struct phy_c45_device_ids *c45_ids); | 745 | struct phy_c45_device_ids *c45_ids); |
743 | struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); | 746 | struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); |
744 | int phy_device_register(struct phy_device *phy); | 747 | int phy_device_register(struct phy_device *phy); |
748 | void phy_device_remove(struct phy_device *phydev); | ||
745 | int phy_init_hw(struct phy_device *phydev); | 749 | int phy_init_hw(struct phy_device *phydev); |
746 | int phy_suspend(struct phy_device *phydev); | 750 | int phy_suspend(struct phy_device *phydev); |
747 | int phy_resume(struct phy_device *phydev); | 751 | int phy_resume(struct phy_device *phydev); |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
230 | struct rcu_synchronize *rs_array); | 230 | struct rcu_synchronize *rs_array); |
231 | 231 | ||
232 | #define _wait_rcu_gp(checktiny, ...) \ | 232 | #define _wait_rcu_gp(checktiny, ...) \ |
233 | do { \ | 233 | do { \ |
234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ | 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
235 | const int __n = ARRAY_SIZE(__crcu_array); \ | 235 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
236 | struct rcu_synchronize __rs_array[__n]; \ | 236 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
237 | \ | 237 | __crcu_array, __rs_array); \ |
238 | __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ | ||
239 | } while (0) | 238 | } while (0) |
240 | 239 | ||
241 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 240 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index a4ab9daa387c..b7b9501b41af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -762,6 +762,18 @@ struct signal_struct { | |||
762 | unsigned audit_tty_log_passwd; | 762 | unsigned audit_tty_log_passwd; |
763 | struct tty_audit_buf *tty_audit_buf; | 763 | struct tty_audit_buf *tty_audit_buf; |
764 | #endif | 764 | #endif |
765 | #ifdef CONFIG_CGROUPS | ||
766 | /* | ||
767 | * group_rwsem prevents new tasks from entering the threadgroup and | ||
768 | * member tasks from exiting,a more specifically, setting of | ||
769 | * PF_EXITING. fork and exit paths are protected with this rwsem | ||
770 | * using threadgroup_change_begin/end(). Users which require | ||
771 | * threadgroup to remain stable should use threadgroup_[un]lock() | ||
772 | * which also takes care of exec path. Currently, cgroup is the | ||
773 | * only user. | ||
774 | */ | ||
775 | struct rw_semaphore group_rwsem; | ||
776 | #endif | ||
765 | 777 | ||
766 | oom_flags_t oom_flags; | 778 | oom_flags_t oom_flags; |
767 | short oom_score_adj; /* OOM kill score adjustment */ | 779 | short oom_score_adj; /* OOM kill score adjustment */ |
diff --git a/include/linux/security.h b/include/linux/security.h index 79d85ddf8093..2f4c1f7aa7db 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, | |||
946 | unsigned long arg4, | 946 | unsigned long arg4, |
947 | unsigned long arg5) | 947 | unsigned long arg5) |
948 | { | 948 | { |
949 | return cap_task_prctl(option, arg2, arg3, arg3, arg5); | 949 | return cap_task_prctl(option, arg2, arg3, arg4, arg5); |
950 | } | 950 | } |
951 | 951 | ||
952 | static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) | 952 | static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2738d355cdf9..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -179,6 +179,9 @@ struct nf_bridge_info { | |||
179 | u8 bridged_dnat:1; | 179 | u8 bridged_dnat:1; |
180 | __u16 frag_max_size; | 180 | __u16 frag_max_size; |
181 | struct net_device *physindev; | 181 | struct net_device *physindev; |
182 | |||
183 | /* always valid & non-NULL from FORWARD on, for physdev match */ | ||
184 | struct net_device *physoutdev; | ||
182 | union { | 185 | union { |
183 | /* prerouting: detect dnat in orig/reply direction */ | 186 | /* prerouting: detect dnat in orig/reply direction */ |
184 | __be32 ipv4_daddr; | 187 | __be32 ipv4_daddr; |
@@ -189,9 +192,6 @@ struct nf_bridge_info { | |||
189 | * skb is out in neigh layer. | 192 | * skb is out in neigh layer. |
190 | */ | 193 | */ |
191 | char neigh_header[8]; | 194 | char neigh_header[8]; |
192 | |||
193 | /* always valid & non-NULL from FORWARD on, for physdev match */ | ||
194 | struct net_device *physoutdev; | ||
195 | }; | 195 | }; |
196 | }; | 196 | }; |
197 | #endif | 197 | #endif |
@@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, | |||
2707 | { | 2707 | { |
2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | 2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
2710 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
2711 | skb_checksum_start_offset(skb) < 0) | ||
2712 | skb->ip_summed = CHECKSUM_NONE; | ||
2710 | } | 2713 | } |
2711 | 2714 | ||
2712 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); | 2715 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 269e8afd3e2a..6b00f18f5e6b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type; | |||
34 | 34 | ||
35 | /** | 35 | /** |
36 | * struct spi_statistics - statistics for spi transfers | 36 | * struct spi_statistics - statistics for spi transfers |
37 | * @clock: lock protecting this structure | 37 | * @lock: lock protecting this structure |
38 | * | 38 | * |
39 | * @messages: number of spi-messages handled | 39 | * @messages: number of spi-messages handled |
40 | * @transfers: number of spi_transfers handled | 40 | * @transfers: number of spi_transfers handled |
diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); | |||
25 | #ifndef __HAVE_ARCH_STRLCPY | 25 | #ifndef __HAVE_ARCH_STRLCPY |
26 | size_t strlcpy(char *, const char *, size_t); | 26 | size_t strlcpy(char *, const char *, size_t); |
27 | #endif | 27 | #endif |
28 | #ifndef __HAVE_ARCH_STRSCPY | ||
29 | ssize_t __must_check strscpy(char *, const char *, size_t); | ||
30 | #endif | ||
28 | #ifndef __HAVE_ARCH_STRCAT | 31 | #ifndef __HAVE_ARCH_STRCAT |
29 | extern char * strcat(char *, const char *); | 32 | extern char * strcat(char *, const char *); |
30 | #endif | 33 | #endif |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7591788e9fbf..357e44c1a46b 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
@@ -42,6 +42,7 @@ struct sock_xprt { | |||
42 | /* | 42 | /* |
43 | * Connection of transports | 43 | * Connection of transports |
44 | */ | 44 | */ |
45 | unsigned long sock_state; | ||
45 | struct delayed_work connect_worker; | 46 | struct delayed_work connect_worker; |
46 | struct sockaddr_storage srcaddr; | 47 | struct sockaddr_storage srcaddr; |
47 | unsigned short srcport; | 48 | unsigned short srcport; |
@@ -76,6 +77,8 @@ struct sock_xprt { | |||
76 | */ | 77 | */ |
77 | #define TCP_RPC_REPLY (1UL << 6) | 78 | #define TCP_RPC_REPLY (1UL << 6) |
78 | 79 | ||
80 | #define XPRT_SOCK_CONNECTING 1U | ||
81 | |||
79 | #endif /* __KERNEL__ */ | 82 | #endif /* __KERNEL__ */ |
80 | 83 | ||
81 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ | 84 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 17292fee8686..157d366e761b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -360,7 +360,7 @@ static inline struct thermal_zone_device * | |||
360 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, | 360 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, |
361 | const struct thermal_zone_of_device_ops *ops) | 361 | const struct thermal_zone_of_device_ops *ops) |
362 | { | 362 | { |
363 | return NULL; | 363 | return ERR_PTR(-ENODEV); |
364 | } | 364 | } |
365 | 365 | ||
366 | static inline | 366 | static inline |
@@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | |||
380 | 380 | ||
381 | int power_actor_get_max_power(struct thermal_cooling_device *, | 381 | int power_actor_get_max_power(struct thermal_cooling_device *, |
382 | struct thermal_zone_device *tz, u32 *max_power); | 382 | struct thermal_zone_device *tz, u32 *max_power); |
383 | int power_actor_get_min_power(struct thermal_cooling_device *, | ||
384 | struct thermal_zone_device *tz, u32 *min_power); | ||
383 | int power_actor_set_power(struct thermal_cooling_device *, | 385 | int power_actor_set_power(struct thermal_cooling_device *, |
384 | struct thermal_instance *, u32); | 386 | struct thermal_instance *, u32); |
385 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, | 387 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, |
@@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | |||
415 | static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, | 417 | static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, |
416 | struct thermal_zone_device *tz, u32 *max_power) | 418 | struct thermal_zone_device *tz, u32 *max_power) |
417 | { return 0; } | 419 | { return 0; } |
420 | static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, | ||
421 | struct thermal_zone_device *tz, | ||
422 | u32 *min_power) | ||
423 | { return -ENODEV; } | ||
418 | static inline int power_actor_set_power(struct thermal_cooling_device *cdev, | 424 | static inline int power_actor_set_power(struct thermal_cooling_device *cdev, |
419 | struct thermal_instance *tz, u32 power) | 425 | struct thermal_instance *tz, u32 power) |
420 | { return 0; } | 426 | { return 0; } |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 48d901f83f92..e312219ff823 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) | |||
147 | cpumask_or(mask, mask, tick_nohz_full_mask); | 147 | cpumask_or(mask, mask, tick_nohz_full_mask); |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline int housekeeping_any_cpu(void) | ||
151 | { | ||
152 | return cpumask_any_and(housekeeping_mask, cpu_online_mask); | ||
153 | } | ||
154 | |||
150 | extern void tick_nohz_full_kick(void); | 155 | extern void tick_nohz_full_kick(void); |
151 | extern void tick_nohz_full_kick_cpu(int cpu); | 156 | extern void tick_nohz_full_kick_cpu(int cpu); |
152 | extern void tick_nohz_full_kick_all(void); | 157 | extern void tick_nohz_full_kick_all(void); |
153 | extern void __tick_nohz_task_switch(void); | 158 | extern void __tick_nohz_task_switch(void); |
154 | #else | 159 | #else |
160 | static inline int housekeeping_any_cpu(void) | ||
161 | { | ||
162 | return smp_processor_id(); | ||
163 | } | ||
155 | static inline bool tick_nohz_full_enabled(void) { return false; } | 164 | static inline bool tick_nohz_full_enabled(void) { return false; } |
156 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } | 165 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } |
157 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } | 166 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { | |||
157 | */ | 157 | */ |
158 | int pio_dma_border; /* default is 64byte */ | 158 | int pio_dma_border; /* default is 64byte */ |
159 | 159 | ||
160 | u32 type; | 160 | uintptr_t type; |
161 | u32 enable_gpio; | 161 | u32 enable_gpio; |
162 | 162 | ||
163 | /* | 163 | /* |
diff --git a/include/linux/wait.h b/include/linux/wait.h index d3d077228d4c..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) | |||
147 | 147 | ||
148 | typedef int wait_bit_action_f(struct wait_bit_key *); | 148 | typedef int wait_bit_action_f(struct wait_bit_key *); |
149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, | 150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
151 | void *key); | ||
152 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 151 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
153 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); | 152 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
154 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 153 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
@@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
180 | #define wake_up_poll(x, m) \ | 179 | #define wake_up_poll(x, m) \ |
181 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) | 180 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) |
182 | #define wake_up_locked_poll(x, m) \ | 181 | #define wake_up_locked_poll(x, m) \ |
183 | __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) | 182 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) |
184 | #define wake_up_interruptible_poll(x, m) \ | 183 | #define wake_up_interruptible_poll(x, m) \ |
185 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) | 184 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) |
186 | #define wake_up_interruptible_sync_poll(x, m) \ | 185 | #define wake_up_interruptible_sync_poll(x, m) \ |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..cb1b9bbda332 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
@@ -63,7 +63,11 @@ struct unix_sock { | |||
63 | #define UNIX_GC_MAYBE_CYCLE 1 | 63 | #define UNIX_GC_MAYBE_CYCLE 1 |
64 | struct socket_wq peer_wq; | 64 | struct socket_wq peer_wq; |
65 | }; | 65 | }; |
66 | #define unix_sk(__sk) ((struct unix_sock *)__sk) | 66 | |
67 | static inline struct unix_sock *unix_sk(struct sock *sk) | ||
68 | { | ||
69 | return (struct unix_sock *)sk; | ||
70 | } | ||
67 | 71 | ||
68 | #define peer_wait peer_wq.wait | 72 | #define peer_wait peer_wq.wait |
69 | 73 | ||
diff --git a/include/net/flow.h b/include/net/flow.h index acd6a096250e..9b85db85f13c 100644 --- a/include/net/flow.h +++ b/include/net/flow.h | |||
@@ -35,6 +35,7 @@ struct flowi_common { | |||
35 | #define FLOWI_FLAG_ANYSRC 0x01 | 35 | #define FLOWI_FLAG_ANYSRC 0x01 |
36 | #define FLOWI_FLAG_KNOWN_NH 0x02 | 36 | #define FLOWI_FLAG_KNOWN_NH 0x02 |
37 | #define FLOWI_FLAG_VRFSRC 0x04 | 37 | #define FLOWI_FLAG_VRFSRC 0x04 |
38 | #define FLOWI_FLAG_SKIP_NH_OIF 0x08 | ||
38 | __u32 flowic_secid; | 39 | __u32 flowic_secid; |
39 | struct flowi_tunnel flowic_tun_key; | 40 | struct flowi_tunnel flowic_tun_key; |
40 | }; | 41 | }; |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 879d6e5a973b..186f3a1e1b1f 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, | |||
110 | void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | 110 | void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, |
111 | struct inet_hashinfo *hashinfo); | 111 | struct inet_hashinfo *hashinfo); |
112 | 112 | ||
113 | void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); | 113 | void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, |
114 | bool rearm); | ||
115 | |||
116 | static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) | ||
117 | { | ||
118 | __inet_twsk_schedule(tw, timeo, false); | ||
119 | } | ||
120 | |||
121 | static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) | ||
122 | { | ||
123 | __inet_twsk_schedule(tw, timeo, true); | ||
124 | } | ||
125 | |||
114 | void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); | 126 | void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); |
115 | 127 | ||
116 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, | 128 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 063d30474cf6..aaf9700fc9e5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
@@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
275 | struct nl_info *info, struct mx6_config *mxc); | 275 | struct nl_info *info, struct mx6_config *mxc); |
276 | int fib6_del(struct rt6_info *rt, struct nl_info *info); | 276 | int fib6_del(struct rt6_info *rt, struct nl_info *info); |
277 | 277 | ||
278 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); | 278 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, |
279 | unsigned int flags); | ||
279 | 280 | ||
280 | void fib6_run_gc(unsigned long expires, struct net *net, bool force); | 281 | void fib6_run_gc(unsigned long expires, struct net *net, bool force); |
281 | 282 | ||
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index b8529aa1dae7..fa915fa0f703 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
@@ -32,6 +32,12 @@ struct __ip6_tnl_parm { | |||
32 | __be32 o_key; | 32 | __be32 o_key; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct ip6_tnl_dst { | ||
36 | seqlock_t lock; | ||
37 | struct dst_entry __rcu *dst; | ||
38 | u32 cookie; | ||
39 | }; | ||
40 | |||
35 | /* IPv6 tunnel */ | 41 | /* IPv6 tunnel */ |
36 | struct ip6_tnl { | 42 | struct ip6_tnl { |
37 | struct ip6_tnl __rcu *next; /* next tunnel in list */ | 43 | struct ip6_tnl __rcu *next; /* next tunnel in list */ |
@@ -39,8 +45,7 @@ struct ip6_tnl { | |||
39 | struct net *net; /* netns for packet i/o */ | 45 | struct net *net; /* netns for packet i/o */ |
40 | struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ | 46 | struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ |
41 | struct flowi fl; /* flowi template for xmit */ | 47 | struct flowi fl; /* flowi template for xmit */ |
42 | struct dst_entry *dst_cache; /* cached dst */ | 48 | struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ |
43 | u32 dst_cookie; | ||
44 | 49 | ||
45 | int err_count; | 50 | int err_count; |
46 | unsigned long err_time; | 51 | unsigned long err_time; |
@@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim { | |||
60 | __u8 encap_limit; /* tunnel encapsulation limit */ | 65 | __u8 encap_limit; /* tunnel encapsulation limit */ |
61 | } __packed; | 66 | } __packed; |
62 | 67 | ||
63 | struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); | 68 | struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); |
69 | int ip6_tnl_dst_init(struct ip6_tnl *t); | ||
70 | void ip6_tnl_dst_destroy(struct ip6_tnl *t); | ||
64 | void ip6_tnl_dst_reset(struct ip6_tnl *t); | 71 | void ip6_tnl_dst_reset(struct ip6_tnl *t); |
65 | void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); | 72 | void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); |
66 | int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, | 73 | int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, |
67 | const struct in6_addr *raddr); | 74 | const struct in6_addr *raddr); |
68 | int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, | 75 | int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, |
@@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
79 | struct net_device_stats *stats = &dev->stats; | 86 | struct net_device_stats *stats = &dev->stats; |
80 | int pkt_len, err; | 87 | int pkt_len, err; |
81 | 88 | ||
82 | pkt_len = skb->len; | 89 | pkt_len = skb->len - skb_inner_network_offset(skb); |
83 | err = ip6_local_out_sk(sk, skb); | 90 | err = ip6_local_out_sk(sk, skb); |
84 | 91 | ||
85 | if (net_xmit_eval(err) == 0) { | 92 | if (net_xmit_eval(err) == 0) { |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a37d0432bebd..727d6e9a9685 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, | |||
236 | rcu_read_lock(); | 236 | rcu_read_lock(); |
237 | 237 | ||
238 | tb = fib_get_table(net, RT_TABLE_MAIN); | 238 | tb = fib_get_table(net, RT_TABLE_MAIN); |
239 | if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) | 239 | if (tb) |
240 | err = 0; | 240 | err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); |
241 | |||
242 | if (err == -EAGAIN) | ||
243 | err = -ENETUNREACH; | ||
241 | 244 | ||
242 | rcu_read_unlock(); | 245 | rcu_read_unlock(); |
243 | 246 | ||
@@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, | |||
258 | struct fib_result *res, unsigned int flags) | 261 | struct fib_result *res, unsigned int flags) |
259 | { | 262 | { |
260 | struct fib_table *tb; | 263 | struct fib_table *tb; |
261 | int err; | 264 | int err = -ENETUNREACH; |
262 | 265 | ||
263 | flags |= FIB_LOOKUP_NOREF; | 266 | flags |= FIB_LOOKUP_NOREF; |
264 | if (net->ipv4.fib_has_custom_rules) | 267 | if (net->ipv4.fib_has_custom_rules) |
@@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, | |||
268 | 271 | ||
269 | res->tclassid = 0; | 272 | res->tclassid = 0; |
270 | 273 | ||
271 | for (err = 0; !err; err = -ENETUNREACH) { | 274 | tb = rcu_dereference_rtnl(net->ipv4.fib_main); |
272 | tb = rcu_dereference_rtnl(net->ipv4.fib_main); | 275 | if (tb) |
273 | if (tb && !fib_table_lookup(tb, flp, res, flags)) | 276 | err = fib_table_lookup(tb, flp, res, flags); |
274 | break; | 277 | |
278 | if (!err) | ||
279 | goto out; | ||
280 | |||
281 | tb = rcu_dereference_rtnl(net->ipv4.fib_default); | ||
282 | if (tb) | ||
283 | err = fib_table_lookup(tb, flp, res, flags); | ||
275 | 284 | ||
276 | tb = rcu_dereference_rtnl(net->ipv4.fib_default); | 285 | out: |
277 | if (tb && !fib_table_lookup(tb, flp, res, flags)) | 286 | if (err == -EAGAIN) |
278 | break; | 287 | err = -ENETUNREACH; |
279 | } | ||
280 | 288 | ||
281 | rcu_read_unlock(); | 289 | rcu_read_unlock(); |
282 | 290 | ||
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9a6a3ba888e8..f6dafec9102c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
@@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); | |||
276 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | 276 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
277 | __be32 src, __be32 dst, u8 proto, | 277 | __be32 src, __be32 dst, u8 proto, |
278 | u8 tos, u8 ttl, __be16 df, bool xnet); | 278 | u8 tos, u8 ttl, __be16 df, bool xnet); |
279 | struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, | ||
280 | gfp_t flags); | ||
279 | 281 | ||
280 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, | 282 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, |
281 | int gso_type_mask); | 283 | int gso_type_mask); |
diff --git a/include/net/route.h b/include/net/route.h index cc61cb95f059..f46af256880c 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -255,7 +255,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 | |||
255 | flow_flags |= FLOWI_FLAG_ANYSRC; | 255 | flow_flags |= FLOWI_FLAG_ANYSRC; |
256 | 256 | ||
257 | if (netif_index_is_vrf(sock_net(sk), oif)) | 257 | if (netif_index_is_vrf(sock_net(sk), oif)) |
258 | flow_flags |= FLOWI_FLAG_VRFSRC; | 258 | flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF; |
259 | 259 | ||
260 | flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, | 260 | flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, |
261 | protocol, flow_flags, dst, src, dport, sport); | 261 | protocol, flow_flags, dst, src, dport, sport); |
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 391dae1931c0..a0fa975cd1c1 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h | |||
@@ -294,8 +294,8 @@ struct opa_port_states { | |||
294 | 294 | ||
295 | struct opa_port_state_info { | 295 | struct opa_port_state_info { |
296 | struct opa_port_states port_states; | 296 | struct opa_port_states port_states; |
297 | u16 link_width_downgrade_tx_active; | 297 | __be16 link_width_downgrade_tx_active; |
298 | u16 link_width_downgrade_rx_active; | 298 | __be16 link_width_downgrade_rx_active; |
299 | }; | 299 | }; |
300 | 300 | ||
301 | struct opa_port_info { | 301 | struct opa_port_info { |
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h index 898be3a8db9a..6d8f8fba3341 100644 --- a/include/sound/wm8904.h +++ b/include/sound/wm8904.h | |||
@@ -119,7 +119,7 @@ | |||
119 | #define WM8904_MIC_REGS 2 | 119 | #define WM8904_MIC_REGS 2 |
120 | #define WM8904_GPIO_REGS 4 | 120 | #define WM8904_GPIO_REGS 4 |
121 | #define WM8904_DRC_REGS 4 | 121 | #define WM8904_DRC_REGS 4 |
122 | #define WM8904_EQ_REGS 25 | 122 | #define WM8904_EQ_REGS 24 |
123 | 123 | ||
124 | /** | 124 | /** |
125 | * DRC configurations are specified with a label and a set of register | 125 | * DRC configurations are specified with a label and a set of register |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ac9bf1c0e42d..5f48754dc36a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -730,6 +730,7 @@ struct se_device { | |||
730 | #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 | 730 | #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 |
731 | #define DF_USING_UDEV_PATH 0x00000008 | 731 | #define DF_USING_UDEV_PATH 0x00000008 |
732 | #define DF_USING_ALIAS 0x00000010 | 732 | #define DF_USING_ALIAS 0x00000010 |
733 | #define DF_READ_ONLY 0x00000020 | ||
733 | /* Physical device queue depth */ | 734 | /* Physical device queue depth */ |
734 | u32 queue_depth; | 735 | u32 queue_depth; |
735 | /* Used for SPC-2 reservations enforce of ISIDs */ | 736 | /* Used for SPC-2 reservations enforce of ISIDs */ |
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index 9df61f1edb0f..3094618d382f 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h | |||
@@ -80,8 +80,10 @@ | |||
80 | * SA_RESTORER 0x04000000 | 80 | * SA_RESTORER 0x04000000 |
81 | */ | 81 | */ |
82 | 82 | ||
83 | #if !defined MINSIGSTKSZ || !defined SIGSTKSZ | ||
83 | #define MINSIGSTKSZ 2048 | 84 | #define MINSIGSTKSZ 2048 |
84 | #define SIGSTKSZ 8192 | 85 | #define SIGSTKSZ 8192 |
86 | #endif | ||
85 | 87 | ||
86 | #ifndef __ASSEMBLY__ | 88 | #ifndef __ASSEMBLY__ |
87 | typedef struct { | 89 | typedef struct { |
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 8da542a2874d..ee124009e12a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h | |||
@@ -709,17 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) | |||
709 | __SYSCALL(__NR_bpf, sys_bpf) | 709 | __SYSCALL(__NR_bpf, sys_bpf) |
710 | #define __NR_execveat 281 | 710 | #define __NR_execveat 281 |
711 | __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) | 711 | __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) |
712 | #define __NR_membarrier 282 | 712 | #define __NR_userfaultfd 282 |
713 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) | ||
714 | #define __NR_membarrier 283 | ||
713 | __SYSCALL(__NR_membarrier, sys_membarrier) | 715 | __SYSCALL(__NR_membarrier, sys_membarrier) |
714 | 716 | ||
715 | #undef __NR_syscalls | 717 | #undef __NR_syscalls |
716 | #define __NR_syscalls 283 | 718 | #define __NR_syscalls 284 |
717 | 719 | ||
718 | /* | 720 | /* |
719 | * All syscalls below here should go away really, | 721 | * All syscalls below here should go away really, |
720 | * these are provided for both review and as a porting | 722 | * these are provided for both review and as a porting |
721 | * help for the C library version. | 723 | * help for the C library version. |
722 | * | 724 | * |
723 | * Last chance: are any of these important enough to | 725 | * Last chance: are any of these important enough to |
724 | * enable by default? | 726 | * enable by default? |
725 | */ | 727 | */ |
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 34141a5dfe74..f8b01887a495 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h | |||
@@ -21,8 +21,6 @@ enum lwtunnel_ip_t { | |||
21 | LWTUNNEL_IP_SRC, | 21 | LWTUNNEL_IP_SRC, |
22 | LWTUNNEL_IP_TTL, | 22 | LWTUNNEL_IP_TTL, |
23 | LWTUNNEL_IP_TOS, | 23 | LWTUNNEL_IP_TOS, |
24 | LWTUNNEL_IP_SPORT, | ||
25 | LWTUNNEL_IP_DPORT, | ||
26 | LWTUNNEL_IP_FLAGS, | 24 | LWTUNNEL_IP_FLAGS, |
27 | __LWTUNNEL_IP_MAX, | 25 | __LWTUNNEL_IP_MAX, |
28 | }; | 26 | }; |
@@ -36,8 +34,6 @@ enum lwtunnel_ip6_t { | |||
36 | LWTUNNEL_IP6_SRC, | 34 | LWTUNNEL_IP6_SRC, |
37 | LWTUNNEL_IP6_HOPLIMIT, | 35 | LWTUNNEL_IP6_HOPLIMIT, |
38 | LWTUNNEL_IP6_TC, | 36 | LWTUNNEL_IP6_TC, |
39 | LWTUNNEL_IP6_SPORT, | ||
40 | LWTUNNEL_IP6_DPORT, | ||
41 | LWTUNNEL_IP6_FLAGS, | 37 | LWTUNNEL_IP6_FLAGS, |
42 | __LWTUNNEL_IP6_MAX, | 38 | __LWTUNNEL_IP6_MAX, |
43 | }; | 39 | }; |
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h | |||
@@ -11,8 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #include <linux/compiler.h> | ||
15 | |||
16 | #define UFFD_API ((__u64)0xAA) | 14 | #define UFFD_API ((__u64)0xAA) |
17 | /* | 15 | /* |
18 | * After implementing the respective features it will become: | 16 | * After implementing the respective features it will become: |
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index 9ce083960a25..f18490985fc8 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h | |||
@@ -107,5 +107,13 @@ struct sched_watchdog { | |||
107 | #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ | 107 | #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ |
108 | #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ | 108 | #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ |
109 | #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ | 109 | #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ |
110 | /* | ||
111 | * Domain asked to perform 'soft reset' for it. The expected behavior is to | ||
112 | * reset internal Xen state for the domain returning it to the point where it | ||
113 | * was created but leaving the domain's memory contents and vCPU contexts | ||
114 | * intact. This will allow the domain to start over and set up all Xen specific | ||
115 | * interfaces again. | ||
116 | */ | ||
117 | #define SHUTDOWN_soft_reset 5 | ||
110 | 118 | ||
111 | #endif /* __XEN_PUBLIC_SCHED_H__ */ | 119 | #endif /* __XEN_PUBLIC_SCHED_H__ */ |
@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
137 | return retval; | 137 | return retval; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* ipc_addid() locks msq upon success. */ | ||
141 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | ||
142 | if (id < 0) { | ||
143 | ipc_rcu_putref(msq, msg_rcu_free); | ||
144 | return id; | ||
145 | } | ||
146 | |||
147 | msq->q_stime = msq->q_rtime = 0; | 140 | msq->q_stime = msq->q_rtime = 0; |
148 | msq->q_ctime = get_seconds(); | 141 | msq->q_ctime = get_seconds(); |
149 | msq->q_cbytes = msq->q_qnum = 0; | 142 | msq->q_cbytes = msq->q_qnum = 0; |
@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
153 | INIT_LIST_HEAD(&msq->q_receivers); | 146 | INIT_LIST_HEAD(&msq->q_receivers); |
154 | INIT_LIST_HEAD(&msq->q_senders); | 147 | INIT_LIST_HEAD(&msq->q_senders); |
155 | 148 | ||
149 | /* ipc_addid() locks msq upon success. */ | ||
150 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | ||
151 | if (id < 0) { | ||
152 | ipc_rcu_putref(msq, msg_rcu_free); | ||
153 | return id; | ||
154 | } | ||
155 | |||
156 | ipc_unlock_object(&msq->q_perm); | 156 | ipc_unlock_object(&msq->q_perm); |
157 | rcu_read_unlock(); | 157 | rcu_read_unlock(); |
158 | 158 | ||
@@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
551 | if (IS_ERR(file)) | 551 | if (IS_ERR(file)) |
552 | goto no_file; | 552 | goto no_file; |
553 | 553 | ||
554 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); | ||
555 | if (id < 0) { | ||
556 | error = id; | ||
557 | goto no_id; | ||
558 | } | ||
559 | |||
560 | shp->shm_cprid = task_tgid_vnr(current); | 554 | shp->shm_cprid = task_tgid_vnr(current); |
561 | shp->shm_lprid = 0; | 555 | shp->shm_lprid = 0; |
562 | shp->shm_atim = shp->shm_dtim = 0; | 556 | shp->shm_atim = shp->shm_dtim = 0; |
@@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
565 | shp->shm_nattch = 0; | 559 | shp->shm_nattch = 0; |
566 | shp->shm_file = file; | 560 | shp->shm_file = file; |
567 | shp->shm_creator = current; | 561 | shp->shm_creator = current; |
562 | |||
563 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); | ||
564 | if (id < 0) { | ||
565 | error = id; | ||
566 | goto no_id; | ||
567 | } | ||
568 | |||
568 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); | 569 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); |
569 | 570 | ||
570 | /* | 571 | /* |
diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) | |||
237 | rcu_read_lock(); | 237 | rcu_read_lock(); |
238 | spin_lock(&new->lock); | 238 | spin_lock(&new->lock); |
239 | 239 | ||
240 | current_euid_egid(&euid, &egid); | ||
241 | new->cuid = new->uid = euid; | ||
242 | new->gid = new->cgid = egid; | ||
243 | |||
240 | id = idr_alloc(&ids->ipcs_idr, new, | 244 | id = idr_alloc(&ids->ipcs_idr, new, |
241 | (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, | 245 | (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, |
242 | GFP_NOWAIT); | 246 | GFP_NOWAIT); |
@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) | |||
249 | 253 | ||
250 | ids->in_use++; | 254 | ids->in_use++; |
251 | 255 | ||
252 | current_euid_egid(&euid, &egid); | ||
253 | new->cuid = new->uid = euid; | ||
254 | new->gid = new->cgid = egid; | ||
255 | |||
256 | if (next_id < 0) { | 256 | if (next_id < 0) { |
257 | new->seq = ids->seq++; | 257 | new->seq = ids->seq++; |
258 | if (ids->seq > IPCID_SEQ_MAX) | 258 | if (ids->seq > IPCID_SEQ_MAX) |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2cf0f79f1fc9..2c9eae6ad970 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/spinlock.h> | 47 | #include <linux/spinlock.h> |
48 | #include <linux/rwsem.h> | 48 | #include <linux/rwsem.h> |
49 | #include <linux/percpu-rwsem.h> | ||
50 | #include <linux/string.h> | 49 | #include <linux/string.h> |
51 | #include <linux/sort.h> | 50 | #include <linux/sort.h> |
52 | #include <linux/kmod.h> | 51 | #include <linux/kmod.h> |
@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); | |||
104 | */ | 103 | */ |
105 | static DEFINE_SPINLOCK(release_agent_path_lock); | 104 | static DEFINE_SPINLOCK(release_agent_path_lock); |
106 | 105 | ||
107 | struct percpu_rw_semaphore cgroup_threadgroup_rwsem; | ||
108 | |||
109 | #define cgroup_assert_mutex_or_rcu_locked() \ | 106 | #define cgroup_assert_mutex_or_rcu_locked() \ |
110 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ | 107 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
111 | !lockdep_is_held(&cgroup_mutex), \ | 108 | !lockdep_is_held(&cgroup_mutex), \ |
@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
874 | return cset; | 871 | return cset; |
875 | } | 872 | } |
876 | 873 | ||
874 | void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||
875 | { | ||
876 | down_read(&tsk->signal->group_rwsem); | ||
877 | } | ||
878 | |||
879 | void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||
880 | { | ||
881 | up_read(&tsk->signal->group_rwsem); | ||
882 | } | ||
883 | |||
884 | /** | ||
885 | * threadgroup_lock - lock threadgroup | ||
886 | * @tsk: member task of the threadgroup to lock | ||
887 | * | ||
888 | * Lock the threadgroup @tsk belongs to. No new task is allowed to enter | ||
889 | * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or | ||
890 | * change ->group_leader/pid. This is useful for cases where the threadgroup | ||
891 | * needs to stay stable across blockable operations. | ||
892 | * | ||
893 | * fork and exit explicitly call threadgroup_change_{begin|end}() for | ||
894 | * synchronization. While held, no new task will be added to threadgroup | ||
895 | * and no existing live task will have its PF_EXITING set. | ||
896 | * | ||
897 | * de_thread() does threadgroup_change_{begin|end}() when a non-leader | ||
898 | * sub-thread becomes a new leader. | ||
899 | */ | ||
900 | static void threadgroup_lock(struct task_struct *tsk) | ||
901 | { | ||
902 | down_write(&tsk->signal->group_rwsem); | ||
903 | } | ||
904 | |||
905 | /** | ||
906 | * threadgroup_unlock - unlock threadgroup | ||
907 | * @tsk: member task of the threadgroup to unlock | ||
908 | * | ||
909 | * Reverse threadgroup_lock(). | ||
910 | */ | ||
911 | static inline void threadgroup_unlock(struct task_struct *tsk) | ||
912 | { | ||
913 | up_write(&tsk->signal->group_rwsem); | ||
914 | } | ||
915 | |||
877 | static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) | 916 | static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) |
878 | { | 917 | { |
879 | struct cgroup *root_cgrp = kf_root->kn->priv; | 918 | struct cgroup *root_cgrp = kf_root->kn->priv; |
@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
2074 | lockdep_assert_held(&css_set_rwsem); | 2113 | lockdep_assert_held(&css_set_rwsem); |
2075 | 2114 | ||
2076 | /* | 2115 | /* |
2077 | * We are synchronized through cgroup_threadgroup_rwsem against | 2116 | * We are synchronized through threadgroup_lock() against PF_EXITING |
2078 | * PF_EXITING setting such that we can't race against cgroup_exit() | 2117 | * setting such that we can't race against cgroup_exit() changing the |
2079 | * changing the css_set to init_css_set and dropping the old one. | 2118 | * css_set to init_css_set and dropping the old one. |
2080 | */ | 2119 | */ |
2081 | WARN_ON_ONCE(tsk->flags & PF_EXITING); | 2120 | WARN_ON_ONCE(tsk->flags & PF_EXITING); |
2082 | old_cset = task_css_set(tsk); | 2121 | old_cset = task_css_set(tsk); |
@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) | |||
2133 | * @src_cset and add it to @preloaded_csets, which should later be cleaned | 2172 | * @src_cset and add it to @preloaded_csets, which should later be cleaned |
2134 | * up by cgroup_migrate_finish(). | 2173 | * up by cgroup_migrate_finish(). |
2135 | * | 2174 | * |
2136 | * This function may be called without holding cgroup_threadgroup_rwsem | 2175 | * This function may be called without holding threadgroup_lock even if the |
2137 | * even if the target is a process. Threads may be created and destroyed | 2176 | * target is a process. Threads may be created and destroyed but as long |
2138 | * but as long as cgroup_mutex is not dropped, no new css_set can be put | 2177 | * as cgroup_mutex is not dropped, no new css_set can be put into play and |
2139 | * into play and the preloaded css_sets are guaranteed to cover all | 2178 | * the preloaded css_sets are guaranteed to cover all migrations. |
2140 | * migrations. | ||
2141 | */ | 2179 | */ |
2142 | static void cgroup_migrate_add_src(struct css_set *src_cset, | 2180 | static void cgroup_migrate_add_src(struct css_set *src_cset, |
2143 | struct cgroup *dst_cgrp, | 2181 | struct cgroup *dst_cgrp, |
@@ -2240,7 +2278,7 @@ err: | |||
2240 | * @threadgroup: whether @leader points to the whole process or a single task | 2278 | * @threadgroup: whether @leader points to the whole process or a single task |
2241 | * | 2279 | * |
2242 | * Migrate a process or task denoted by @leader to @cgrp. If migrating a | 2280 | * Migrate a process or task denoted by @leader to @cgrp. If migrating a |
2243 | * process, the caller must be holding cgroup_threadgroup_rwsem. The | 2281 | * process, the caller must be holding threadgroup_lock of @leader. The |
2244 | * caller is also responsible for invoking cgroup_migrate_add_src() and | 2282 | * caller is also responsible for invoking cgroup_migrate_add_src() and |
2245 | * cgroup_migrate_prepare_dst() on the targets before invoking this | 2283 | * cgroup_migrate_prepare_dst() on the targets before invoking this |
2246 | * function and following up with cgroup_migrate_finish(). | 2284 | * function and following up with cgroup_migrate_finish(). |
@@ -2368,7 +2406,7 @@ out_release_tset: | |||
2368 | * @leader: the task or the leader of the threadgroup to be attached | 2406 | * @leader: the task or the leader of the threadgroup to be attached |
2369 | * @threadgroup: attach the whole threadgroup? | 2407 | * @threadgroup: attach the whole threadgroup? |
2370 | * | 2408 | * |
2371 | * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. | 2409 | * Call holding cgroup_mutex and threadgroup_lock of @leader. |
2372 | */ | 2410 | */ |
2373 | static int cgroup_attach_task(struct cgroup *dst_cgrp, | 2411 | static int cgroup_attach_task(struct cgroup *dst_cgrp, |
2374 | struct task_struct *leader, bool threadgroup) | 2412 | struct task_struct *leader, bool threadgroup) |
@@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, | |||
2460 | if (!cgrp) | 2498 | if (!cgrp) |
2461 | return -ENODEV; | 2499 | return -ENODEV; |
2462 | 2500 | ||
2463 | percpu_down_write(&cgroup_threadgroup_rwsem); | 2501 | retry_find_task: |
2464 | rcu_read_lock(); | 2502 | rcu_read_lock(); |
2465 | if (pid) { | 2503 | if (pid) { |
2466 | tsk = find_task_by_vpid(pid); | 2504 | tsk = find_task_by_vpid(pid); |
2467 | if (!tsk) { | 2505 | if (!tsk) { |
2506 | rcu_read_unlock(); | ||
2468 | ret = -ESRCH; | 2507 | ret = -ESRCH; |
2469 | goto out_unlock_rcu; | 2508 | goto out_unlock_cgroup; |
2470 | } | 2509 | } |
2471 | } else { | 2510 | } else { |
2472 | tsk = current; | 2511 | tsk = current; |
@@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, | |||
2482 | */ | 2521 | */ |
2483 | if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { | 2522 | if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { |
2484 | ret = -EINVAL; | 2523 | ret = -EINVAL; |
2485 | goto out_unlock_rcu; | 2524 | rcu_read_unlock(); |
2525 | goto out_unlock_cgroup; | ||
2486 | } | 2526 | } |
2487 | 2527 | ||
2488 | get_task_struct(tsk); | 2528 | get_task_struct(tsk); |
2489 | rcu_read_unlock(); | 2529 | rcu_read_unlock(); |
2490 | 2530 | ||
2531 | threadgroup_lock(tsk); | ||
2532 | if (threadgroup) { | ||
2533 | if (!thread_group_leader(tsk)) { | ||
2534 | /* | ||
2535 | * a race with de_thread from another thread's exec() | ||
2536 | * may strip us of our leadership, if this happens, | ||
2537 | * there is no choice but to throw this task away and | ||
2538 | * try again; this is | ||
2539 | * "double-double-toil-and-trouble-check locking". | ||
2540 | */ | ||
2541 | threadgroup_unlock(tsk); | ||
2542 | put_task_struct(tsk); | ||
2543 | goto retry_find_task; | ||
2544 | } | ||
2545 | } | ||
2546 | |||
2491 | ret = cgroup_procs_write_permission(tsk, cgrp, of); | 2547 | ret = cgroup_procs_write_permission(tsk, cgrp, of); |
2492 | if (!ret) | 2548 | if (!ret) |
2493 | ret = cgroup_attach_task(cgrp, tsk, threadgroup); | 2549 | ret = cgroup_attach_task(cgrp, tsk, threadgroup); |
2494 | 2550 | ||
2495 | put_task_struct(tsk); | 2551 | threadgroup_unlock(tsk); |
2496 | goto out_unlock_threadgroup; | ||
2497 | 2552 | ||
2498 | out_unlock_rcu: | 2553 | put_task_struct(tsk); |
2499 | rcu_read_unlock(); | 2554 | out_unlock_cgroup: |
2500 | out_unlock_threadgroup: | ||
2501 | percpu_up_write(&cgroup_threadgroup_rwsem); | ||
2502 | cgroup_kn_unlock(of->kn); | 2555 | cgroup_kn_unlock(of->kn); |
2503 | return ret ?: nbytes; | 2556 | return ret ?: nbytes; |
2504 | } | 2557 | } |
@@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
2643 | 2696 | ||
2644 | lockdep_assert_held(&cgroup_mutex); | 2697 | lockdep_assert_held(&cgroup_mutex); |
2645 | 2698 | ||
2646 | percpu_down_write(&cgroup_threadgroup_rwsem); | ||
2647 | |||
2648 | /* look up all csses currently attached to @cgrp's subtree */ | 2699 | /* look up all csses currently attached to @cgrp's subtree */ |
2649 | down_read(&css_set_rwsem); | 2700 | down_read(&css_set_rwsem); |
2650 | css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { | 2701 | css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { |
@@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
2700 | goto out_finish; | 2751 | goto out_finish; |
2701 | last_task = task; | 2752 | last_task = task; |
2702 | 2753 | ||
2754 | threadgroup_lock(task); | ||
2755 | /* raced against de_thread() from another thread? */ | ||
2756 | if (!thread_group_leader(task)) { | ||
2757 | threadgroup_unlock(task); | ||
2758 | put_task_struct(task); | ||
2759 | continue; | ||
2760 | } | ||
2761 | |||
2703 | ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); | 2762 | ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); |
2704 | 2763 | ||
2764 | threadgroup_unlock(task); | ||
2705 | put_task_struct(task); | 2765 | put_task_struct(task); |
2706 | 2766 | ||
2707 | if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) | 2767 | if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) |
@@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
2711 | 2771 | ||
2712 | out_finish: | 2772 | out_finish: |
2713 | cgroup_migrate_finish(&preloaded_csets); | 2773 | cgroup_migrate_finish(&preloaded_csets); |
2714 | percpu_up_write(&cgroup_threadgroup_rwsem); | ||
2715 | return ret; | 2774 | return ret; |
2716 | } | 2775 | } |
2717 | 2776 | ||
@@ -5024,7 +5083,6 @@ int __init cgroup_init(void) | |||
5024 | unsigned long key; | 5083 | unsigned long key; |
5025 | int ssid, err; | 5084 | int ssid, err; |
5026 | 5085 | ||
5027 | BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); | ||
5028 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); | 5086 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); |
5029 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); | 5087 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); |
5030 | 5088 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) | |||
1243 | PERF_EVENT_STATE_INACTIVE; | 1243 | PERF_EVENT_STATE_INACTIVE; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* | 1246 | static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
1247 | * Called at perf_event creation and when events are attached/detached from a | ||
1248 | * group. | ||
1249 | */ | ||
1250 | static void perf_event__read_size(struct perf_event *event) | ||
1251 | { | 1247 | { |
1252 | int entry = sizeof(u64); /* value */ | 1248 | int entry = sizeof(u64); /* value */ |
1253 | int size = 0; | 1249 | int size = 0; |
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) | |||
1263 | entry += sizeof(u64); | 1259 | entry += sizeof(u64); |
1264 | 1260 | ||
1265 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | 1261 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
1266 | nr += event->group_leader->nr_siblings; | 1262 | nr += nr_siblings; |
1267 | size += sizeof(u64); | 1263 | size += sizeof(u64); |
1268 | } | 1264 | } |
1269 | 1265 | ||
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) | |||
1271 | event->read_size = size; | 1267 | event->read_size = size; |
1272 | } | 1268 | } |
1273 | 1269 | ||
1274 | static void perf_event__header_size(struct perf_event *event) | 1270 | static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
1275 | { | 1271 | { |
1276 | struct perf_sample_data *data; | 1272 | struct perf_sample_data *data; |
1277 | u64 sample_type = event->attr.sample_type; | ||
1278 | u16 size = 0; | 1273 | u16 size = 0; |
1279 | 1274 | ||
1280 | perf_event__read_size(event); | ||
1281 | |||
1282 | if (sample_type & PERF_SAMPLE_IP) | 1275 | if (sample_type & PERF_SAMPLE_IP) |
1283 | size += sizeof(data->ip); | 1276 | size += sizeof(data->ip); |
1284 | 1277 | ||
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) | |||
1303 | event->header_size = size; | 1296 | event->header_size = size; |
1304 | } | 1297 | } |
1305 | 1298 | ||
1299 | /* | ||
1300 | * Called at perf_event creation and when events are attached/detached from a | ||
1301 | * group. | ||
1302 | */ | ||
1303 | static void perf_event__header_size(struct perf_event *event) | ||
1304 | { | ||
1305 | __perf_event_read_size(event, | ||
1306 | event->group_leader->nr_siblings); | ||
1307 | __perf_event_header_size(event, event->attr.sample_type); | ||
1308 | } | ||
1309 | |||
1306 | static void perf_event__id_header_size(struct perf_event *event) | 1310 | static void perf_event__id_header_size(struct perf_event *event) |
1307 | { | 1311 | { |
1308 | struct perf_sample_data *data; | 1312 | struct perf_sample_data *data; |
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) | |||
1330 | event->id_header_size = size; | 1334 | event->id_header_size = size; |
1331 | } | 1335 | } |
1332 | 1336 | ||
1337 | static bool perf_event_validate_size(struct perf_event *event) | ||
1338 | { | ||
1339 | /* | ||
1340 | * The values computed here will be over-written when we actually | ||
1341 | * attach the event. | ||
1342 | */ | ||
1343 | __perf_event_read_size(event, event->group_leader->nr_siblings + 1); | ||
1344 | __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); | ||
1345 | perf_event__id_header_size(event); | ||
1346 | |||
1347 | /* | ||
1348 | * Sum the lot; should not exceed the 64k limit we have on records. | ||
1349 | * Conservative limit to allow for callchains and other variable fields. | ||
1350 | */ | ||
1351 | if (event->read_size + event->header_size + | ||
1352 | event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) | ||
1353 | return false; | ||
1354 | |||
1355 | return true; | ||
1356 | } | ||
1357 | |||
1333 | static void perf_group_attach(struct perf_event *event) | 1358 | static void perf_group_attach(struct perf_event *event) |
1334 | { | 1359 | { |
1335 | struct perf_event *group_leader = event->group_leader, *pos; | 1360 | struct perf_event *group_leader = event->group_leader, *pos; |
@@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8297 | 8322 | ||
8298 | if (move_group) { | 8323 | if (move_group) { |
8299 | gctx = group_leader->ctx; | 8324 | gctx = group_leader->ctx; |
8325 | mutex_lock_double(&gctx->mutex, &ctx->mutex); | ||
8326 | } else { | ||
8327 | mutex_lock(&ctx->mutex); | ||
8328 | } | ||
8300 | 8329 | ||
8330 | if (!perf_event_validate_size(event)) { | ||
8331 | err = -E2BIG; | ||
8332 | goto err_locked; | ||
8333 | } | ||
8334 | |||
8335 | /* | ||
8336 | * Must be under the same ctx::mutex as perf_install_in_context(), | ||
8337 | * because we need to serialize with concurrent event creation. | ||
8338 | */ | ||
8339 | if (!exclusive_event_installable(event, ctx)) { | ||
8340 | /* exclusive and group stuff are assumed mutually exclusive */ | ||
8341 | WARN_ON_ONCE(move_group); | ||
8342 | |||
8343 | err = -EBUSY; | ||
8344 | goto err_locked; | ||
8345 | } | ||
8346 | |||
8347 | WARN_ON_ONCE(ctx->parent_ctx); | ||
8348 | |||
8349 | if (move_group) { | ||
8301 | /* | 8350 | /* |
8302 | * See perf_event_ctx_lock() for comments on the details | 8351 | * See perf_event_ctx_lock() for comments on the details |
8303 | * of swizzling perf_event::ctx. | 8352 | * of swizzling perf_event::ctx. |
8304 | */ | 8353 | */ |
8305 | mutex_lock_double(&gctx->mutex, &ctx->mutex); | ||
8306 | |||
8307 | perf_remove_from_context(group_leader, false); | 8354 | perf_remove_from_context(group_leader, false); |
8308 | 8355 | ||
8309 | list_for_each_entry(sibling, &group_leader->sibling_list, | 8356 | list_for_each_entry(sibling, &group_leader->sibling_list, |
@@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8311 | perf_remove_from_context(sibling, false); | 8358 | perf_remove_from_context(sibling, false); |
8312 | put_ctx(gctx); | 8359 | put_ctx(gctx); |
8313 | } | 8360 | } |
8314 | } else { | ||
8315 | mutex_lock(&ctx->mutex); | ||
8316 | } | ||
8317 | 8361 | ||
8318 | WARN_ON_ONCE(ctx->parent_ctx); | ||
8319 | |||
8320 | if (move_group) { | ||
8321 | /* | 8362 | /* |
8322 | * Wait for everybody to stop referencing the events through | 8363 | * Wait for everybody to stop referencing the events through |
8323 | * the old lists, before installing it on new lists. | 8364 | * the old lists, before installing it on new lists. |
@@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8349 | perf_event__state_init(group_leader); | 8390 | perf_event__state_init(group_leader); |
8350 | perf_install_in_context(ctx, group_leader, group_leader->cpu); | 8391 | perf_install_in_context(ctx, group_leader, group_leader->cpu); |
8351 | get_ctx(ctx); | 8392 | get_ctx(ctx); |
8352 | } | ||
8353 | 8393 | ||
8354 | if (!exclusive_event_installable(event, ctx)) { | 8394 | /* |
8355 | err = -EBUSY; | 8395 | * Now that all events are installed in @ctx, nothing |
8356 | mutex_unlock(&ctx->mutex); | 8396 | * references @gctx anymore, so drop the last reference we have |
8357 | fput(event_file); | 8397 | * on it. |
8358 | goto err_context; | 8398 | */ |
8399 | put_ctx(gctx); | ||
8359 | } | 8400 | } |
8360 | 8401 | ||
8402 | /* | ||
8403 | * Precalculate sample_data sizes; do while holding ctx::mutex such | ||
8404 | * that we're serialized against further additions and before | ||
8405 | * perf_install_in_context() which is the point the event is active and | ||
8406 | * can use these values. | ||
8407 | */ | ||
8408 | perf_event__header_size(event); | ||
8409 | perf_event__id_header_size(event); | ||
8410 | |||
8361 | perf_install_in_context(ctx, event, event->cpu); | 8411 | perf_install_in_context(ctx, event, event->cpu); |
8362 | perf_unpin_context(ctx); | 8412 | perf_unpin_context(ctx); |
8363 | 8413 | ||
8364 | if (move_group) { | 8414 | if (move_group) |
8365 | mutex_unlock(&gctx->mutex); | 8415 | mutex_unlock(&gctx->mutex); |
8366 | put_ctx(gctx); | ||
8367 | } | ||
8368 | mutex_unlock(&ctx->mutex); | 8416 | mutex_unlock(&ctx->mutex); |
8369 | 8417 | ||
8370 | put_online_cpus(); | 8418 | put_online_cpus(); |
@@ -8376,12 +8424,6 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8376 | mutex_unlock(¤t->perf_event_mutex); | 8424 | mutex_unlock(¤t->perf_event_mutex); |
8377 | 8425 | ||
8378 | /* | 8426 | /* |
8379 | * Precalculate sample_data sizes | ||
8380 | */ | ||
8381 | perf_event__header_size(event); | ||
8382 | perf_event__id_header_size(event); | ||
8383 | |||
8384 | /* | ||
8385 | * Drop the reference on the group_event after placing the | 8427 | * Drop the reference on the group_event after placing the |
8386 | * new event on the sibling_list. This ensures destruction | 8428 | * new event on the sibling_list. This ensures destruction |
8387 | * of the group leader will find the pointer to itself in | 8429 | * of the group leader will find the pointer to itself in |
@@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8391 | fd_install(event_fd, event_file); | 8433 | fd_install(event_fd, event_file); |
8392 | return event_fd; | 8434 | return event_fd; |
8393 | 8435 | ||
8436 | err_locked: | ||
8437 | if (move_group) | ||
8438 | mutex_unlock(&gctx->mutex); | ||
8439 | mutex_unlock(&ctx->mutex); | ||
8440 | /* err_file: */ | ||
8441 | fput(event_file); | ||
8394 | err_context: | 8442 | err_context: |
8395 | perf_unpin_context(ctx); | 8443 | perf_unpin_context(ctx); |
8396 | put_ctx(ctx); | 8444 | put_ctx(ctx); |
diff --git a/kernel/fork.c b/kernel/fork.c index 7d5f0f118a63..2845623fb582 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
1149 | tty_audit_fork(sig); | 1149 | tty_audit_fork(sig); |
1150 | sched_autogroup_fork(sig); | 1150 | sched_autogroup_fork(sig); |
1151 | 1151 | ||
1152 | #ifdef CONFIG_CGROUPS | ||
1153 | init_rwsem(&sig->group_rwsem); | ||
1154 | #endif | ||
1155 | |||
1152 | sig->oom_score_adj = current->signal->oom_score_adj; | 1156 | sig->oom_score_adj = current->signal->oom_score_adj; |
1153 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; | 1157 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
1154 | 1158 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6e40a9539763..e28169dd1c36 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -83,7 +83,7 @@ int irq_set_handler_data(unsigned int irq, void *data) | |||
83 | 83 | ||
84 | if (!desc) | 84 | if (!desc) |
85 | return -EINVAL; | 85 | return -EINVAL; |
86 | desc->irq_data.handler_data = data; | 86 | desc->irq_common_data.handler_data = data; |
87 | irq_put_desc_unlock(desc, flags); | 87 | irq_put_desc_unlock(desc, flags); |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
@@ -105,7 +105,7 @@ int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, | |||
105 | 105 | ||
106 | if (!desc) | 106 | if (!desc) |
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | desc->irq_data.msi_desc = entry; | 108 | desc->irq_common_data.msi_desc = entry; |
109 | if (entry && !irq_offset) | 109 | if (entry && !irq_offset) |
110 | entry->irq = irq_base; | 110 | entry->irq = irq_base; |
111 | irq_put_desc_unlock(desc, flags); | 111 | irq_put_desc_unlock(desc, flags); |
@@ -372,7 +372,6 @@ static bool irq_may_run(struct irq_desc *desc) | |||
372 | 372 | ||
373 | /** | 373 | /** |
374 | * handle_simple_irq - Simple and software-decoded IRQs. | 374 | * handle_simple_irq - Simple and software-decoded IRQs. |
375 | * @irq: the interrupt number | ||
376 | * @desc: the interrupt description structure for this irq | 375 | * @desc: the interrupt description structure for this irq |
377 | * | 376 | * |
378 | * Simple interrupts are either sent from a demultiplexing interrupt | 377 | * Simple interrupts are either sent from a demultiplexing interrupt |
@@ -382,8 +381,7 @@ static bool irq_may_run(struct irq_desc *desc) | |||
382 | * Note: The caller is expected to handle the ack, clear, mask and | 381 | * Note: The caller is expected to handle the ack, clear, mask and |
383 | * unmask issues if necessary. | 382 | * unmask issues if necessary. |
384 | */ | 383 | */ |
385 | void | 384 | void handle_simple_irq(struct irq_desc *desc) |
386 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) | ||
387 | { | 385 | { |
388 | raw_spin_lock(&desc->lock); | 386 | raw_spin_lock(&desc->lock); |
389 | 387 | ||
@@ -425,7 +423,6 @@ static void cond_unmask_irq(struct irq_desc *desc) | |||
425 | 423 | ||
426 | /** | 424 | /** |
427 | * handle_level_irq - Level type irq handler | 425 | * handle_level_irq - Level type irq handler |
428 | * @irq: the interrupt number | ||
429 | * @desc: the interrupt description structure for this irq | 426 | * @desc: the interrupt description structure for this irq |
430 | * | 427 | * |
431 | * Level type interrupts are active as long as the hardware line has | 428 | * Level type interrupts are active as long as the hardware line has |
@@ -433,8 +430,7 @@ static void cond_unmask_irq(struct irq_desc *desc) | |||
433 | * it after the associated handler has acknowledged the device, so the | 430 | * it after the associated handler has acknowledged the device, so the |
434 | * interrupt line is back to inactive. | 431 | * interrupt line is back to inactive. |
435 | */ | 432 | */ |
436 | void | 433 | void handle_level_irq(struct irq_desc *desc) |
437 | handle_level_irq(unsigned int irq, struct irq_desc *desc) | ||
438 | { | 434 | { |
439 | raw_spin_lock(&desc->lock); | 435 | raw_spin_lock(&desc->lock); |
440 | mask_ack_irq(desc); | 436 | mask_ack_irq(desc); |
@@ -496,7 +492,6 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) | |||
496 | 492 | ||
497 | /** | 493 | /** |
498 | * handle_fasteoi_irq - irq handler for transparent controllers | 494 | * handle_fasteoi_irq - irq handler for transparent controllers |
499 | * @irq: the interrupt number | ||
500 | * @desc: the interrupt description structure for this irq | 495 | * @desc: the interrupt description structure for this irq |
501 | * | 496 | * |
502 | * Only a single callback will be issued to the chip: an ->eoi() | 497 | * Only a single callback will be issued to the chip: an ->eoi() |
@@ -504,8 +499,7 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) | |||
504 | * for modern forms of interrupt handlers, which handle the flow | 499 | * for modern forms of interrupt handlers, which handle the flow |
505 | * details in hardware, transparently. | 500 | * details in hardware, transparently. |
506 | */ | 501 | */ |
507 | void | 502 | void handle_fasteoi_irq(struct irq_desc *desc) |
508 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | ||
509 | { | 503 | { |
510 | struct irq_chip *chip = desc->irq_data.chip; | 504 | struct irq_chip *chip = desc->irq_data.chip; |
511 | 505 | ||
@@ -546,7 +540,6 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | |||
546 | 540 | ||
547 | /** | 541 | /** |
548 | * handle_edge_irq - edge type IRQ handler | 542 | * handle_edge_irq - edge type IRQ handler |
549 | * @irq: the interrupt number | ||
550 | * @desc: the interrupt description structure for this irq | 543 | * @desc: the interrupt description structure for this irq |
551 | * | 544 | * |
552 | * Interrupt occures on the falling and/or rising edge of a hardware | 545 | * Interrupt occures on the falling and/or rising edge of a hardware |
@@ -560,8 +553,7 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | |||
560 | * the handler was running. If all pending interrupts are handled, the | 553 | * the handler was running. If all pending interrupts are handled, the |
561 | * loop is left. | 554 | * loop is left. |
562 | */ | 555 | */ |
563 | void | 556 | void handle_edge_irq(struct irq_desc *desc) |
564 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) | ||
565 | { | 557 | { |
566 | raw_spin_lock(&desc->lock); | 558 | raw_spin_lock(&desc->lock); |
567 | 559 | ||
@@ -618,13 +610,12 @@ EXPORT_SYMBOL(handle_edge_irq); | |||
618 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER | 610 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
619 | /** | 611 | /** |
620 | * handle_edge_eoi_irq - edge eoi type IRQ handler | 612 | * handle_edge_eoi_irq - edge eoi type IRQ handler |
621 | * @irq: the interrupt number | ||
622 | * @desc: the interrupt description structure for this irq | 613 | * @desc: the interrupt description structure for this irq |
623 | * | 614 | * |
624 | * Similar as the above handle_edge_irq, but using eoi and w/o the | 615 | * Similar as the above handle_edge_irq, but using eoi and w/o the |
625 | * mask/unmask logic. | 616 | * mask/unmask logic. |
626 | */ | 617 | */ |
627 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | 618 | void handle_edge_eoi_irq(struct irq_desc *desc) |
628 | { | 619 | { |
629 | struct irq_chip *chip = irq_desc_get_chip(desc); | 620 | struct irq_chip *chip = irq_desc_get_chip(desc); |
630 | 621 | ||
@@ -665,13 +656,11 @@ out_eoi: | |||
665 | 656 | ||
666 | /** | 657 | /** |
667 | * handle_percpu_irq - Per CPU local irq handler | 658 | * handle_percpu_irq - Per CPU local irq handler |
668 | * @irq: the interrupt number | ||
669 | * @desc: the interrupt description structure for this irq | 659 | * @desc: the interrupt description structure for this irq |
670 | * | 660 | * |
671 | * Per CPU interrupts on SMP machines without locking requirements | 661 | * Per CPU interrupts on SMP machines without locking requirements |
672 | */ | 662 | */ |
673 | void | 663 | void handle_percpu_irq(struct irq_desc *desc) |
674 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | ||
675 | { | 664 | { |
676 | struct irq_chip *chip = irq_desc_get_chip(desc); | 665 | struct irq_chip *chip = irq_desc_get_chip(desc); |
677 | 666 | ||
@@ -688,7 +677,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
688 | 677 | ||
689 | /** | 678 | /** |
690 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | 679 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids |
691 | * @irq: the interrupt number | ||
692 | * @desc: the interrupt description structure for this irq | 680 | * @desc: the interrupt description structure for this irq |
693 | * | 681 | * |
694 | * Per CPU interrupts on SMP machines without locking requirements. Same as | 682 | * Per CPU interrupts on SMP machines without locking requirements. Same as |
@@ -698,11 +686,12 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
698 | * contain the real device id for the cpu on which this handler is | 686 | * contain the real device id for the cpu on which this handler is |
699 | * called | 687 | * called |
700 | */ | 688 | */ |
701 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | 689 | void handle_percpu_devid_irq(struct irq_desc *desc) |
702 | { | 690 | { |
703 | struct irq_chip *chip = irq_desc_get_chip(desc); | 691 | struct irq_chip *chip = irq_desc_get_chip(desc); |
704 | struct irqaction *action = desc->action; | 692 | struct irqaction *action = desc->action; |
705 | void *dev_id = raw_cpu_ptr(action->percpu_dev_id); | 693 | void *dev_id = raw_cpu_ptr(action->percpu_dev_id); |
694 | unsigned int irq = irq_desc_get_irq(desc); | ||
706 | irqreturn_t res; | 695 | irqreturn_t res; |
707 | 696 | ||
708 | kstat_incr_irqs_this_cpu(desc); | 697 | kstat_incr_irqs_this_cpu(desc); |
@@ -796,7 +785,7 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |||
796 | return; | 785 | return; |
797 | 786 | ||
798 | __irq_do_set_handler(desc, handle, 1, NULL); | 787 | __irq_do_set_handler(desc, handle, 1, NULL); |
799 | desc->irq_data.handler_data = data; | 788 | desc->irq_common_data.handler_data = data; |
800 | 789 | ||
801 | irq_put_desc_busunlock(desc, flags); | 790 | irq_put_desc_busunlock(desc, flags); |
802 | } | 791 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index b6eeea8a80c5..e25a83b67cce 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -22,17 +22,19 @@ | |||
22 | 22 | ||
23 | /** | 23 | /** |
24 | * handle_bad_irq - handle spurious and unhandled irqs | 24 | * handle_bad_irq - handle spurious and unhandled irqs |
25 | * @irq: the interrupt number | ||
26 | * @desc: description of the interrupt | 25 | * @desc: description of the interrupt |
27 | * | 26 | * |
28 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 27 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |
29 | */ | 28 | */ |
30 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | 29 | void handle_bad_irq(struct irq_desc *desc) |
31 | { | 30 | { |
31 | unsigned int irq = irq_desc_get_irq(desc); | ||
32 | |||
32 | print_irq_desc(irq, desc); | 33 | print_irq_desc(irq, desc); |
33 | kstat_incr_irqs_this_cpu(desc); | 34 | kstat_incr_irqs_this_cpu(desc); |
34 | ack_bad_irq(irq); | 35 | ack_bad_irq(irq); |
35 | } | 36 | } |
37 | EXPORT_SYMBOL_GPL(handle_bad_irq); | ||
36 | 38 | ||
37 | /* | 39 | /* |
38 | * Special, empty irq handler: | 40 | * Special, empty irq handler: |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index eee4b385cffb..5ef0c2dbe930 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -194,7 +194,7 @@ static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) | |||
194 | 194 | ||
195 | static inline int irq_desc_get_node(struct irq_desc *desc) | 195 | static inline int irq_desc_get_node(struct irq_desc *desc) |
196 | { | 196 | { |
197 | return irq_data_get_node(&desc->irq_data); | 197 | return irq_common_data_get_node(&desc->irq_common_data); |
198 | } | 198 | } |
199 | 199 | ||
200 | #ifdef CONFIG_PM_SLEEP | 200 | #ifdef CONFIG_PM_SLEEP |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 0a2a4b697bcb..239e2ae2c947 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -38,12 +38,13 @@ static void __init init_irq_default_affinity(void) | |||
38 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
39 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 39 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
40 | { | 40 | { |
41 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 41 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
42 | gfp, node)) | ||
42 | return -ENOMEM; | 43 | return -ENOMEM; |
43 | 44 | ||
44 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 45 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
45 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 46 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
46 | free_cpumask_var(desc->irq_data.affinity); | 47 | free_cpumask_var(desc->irq_common_data.affinity); |
47 | return -ENOMEM; | 48 | return -ENOMEM; |
48 | } | 49 | } |
49 | #endif | 50 | #endif |
@@ -52,11 +53,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | |||
52 | 53 | ||
53 | static void desc_smp_init(struct irq_desc *desc, int node) | 54 | static void desc_smp_init(struct irq_desc *desc, int node) |
54 | { | 55 | { |
55 | desc->irq_data.node = node; | 56 | cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); |
56 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | ||
57 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 57 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
58 | cpumask_clear(desc->pending_mask); | 58 | cpumask_clear(desc->pending_mask); |
59 | #endif | 59 | #endif |
60 | #ifdef CONFIG_NUMA | ||
61 | desc->irq_common_data.node = node; | ||
62 | #endif | ||
60 | } | 63 | } |
61 | 64 | ||
62 | #else | 65 | #else |
@@ -70,12 +73,13 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | |||
70 | { | 73 | { |
71 | int cpu; | 74 | int cpu; |
72 | 75 | ||
76 | desc->irq_common_data.handler_data = NULL; | ||
77 | desc->irq_common_data.msi_desc = NULL; | ||
78 | |||
73 | desc->irq_data.common = &desc->irq_common_data; | 79 | desc->irq_data.common = &desc->irq_common_data; |
74 | desc->irq_data.irq = irq; | 80 | desc->irq_data.irq = irq; |
75 | desc->irq_data.chip = &no_irq_chip; | 81 | desc->irq_data.chip = &no_irq_chip; |
76 | desc->irq_data.chip_data = NULL; | 82 | desc->irq_data.chip_data = NULL; |
77 | desc->irq_data.handler_data = NULL; | ||
78 | desc->irq_data.msi_desc = NULL; | ||
79 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 83 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
80 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 84 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
81 | desc->handle_irq = handle_bad_irq; | 85 | desc->handle_irq = handle_bad_irq; |
@@ -121,7 +125,7 @@ static void free_masks(struct irq_desc *desc) | |||
121 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 125 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
122 | free_cpumask_var(desc->pending_mask); | 126 | free_cpumask_var(desc->pending_mask); |
123 | #endif | 127 | #endif |
124 | free_cpumask_var(desc->irq_data.affinity); | 128 | free_cpumask_var(desc->irq_common_data.affinity); |
125 | } | 129 | } |
126 | #else | 130 | #else |
127 | static inline void free_masks(struct irq_desc *desc) { } | 131 | static inline void free_masks(struct irq_desc *desc) { } |
@@ -343,7 +347,7 @@ int generic_handle_irq(unsigned int irq) | |||
343 | 347 | ||
344 | if (!desc) | 348 | if (!desc) |
345 | return -EINVAL; | 349 | return -EINVAL; |
346 | generic_handle_irq_desc(irq, desc); | 350 | generic_handle_irq_desc(desc); |
347 | return 0; | 351 | return 0; |
348 | } | 352 | } |
349 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 353 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 79baaf8a7813..dc9d27c0c158 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -844,7 +844,6 @@ static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, | |||
844 | child->parent_data = irq_data; | 844 | child->parent_data = irq_data; |
845 | irq_data->irq = child->irq; | 845 | irq_data->irq = child->irq; |
846 | irq_data->common = child->common; | 846 | irq_data->common = child->common; |
847 | irq_data->node = child->node; | ||
848 | irq_data->domain = domain; | 847 | irq_data->domain = domain; |
849 | } | 848 | } |
850 | 849 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ad1b064f94fe..f9a59f6cabd2 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -192,7 +192,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
192 | switch (ret) { | 192 | switch (ret) { |
193 | case IRQ_SET_MASK_OK: | 193 | case IRQ_SET_MASK_OK: |
194 | case IRQ_SET_MASK_OK_DONE: | 194 | case IRQ_SET_MASK_OK_DONE: |
195 | cpumask_copy(data->affinity, mask); | 195 | cpumask_copy(desc->irq_common_data.affinity, mask); |
196 | case IRQ_SET_MASK_OK_NOCOPY: | 196 | case IRQ_SET_MASK_OK_NOCOPY: |
197 | irq_set_thread_affinity(desc); | 197 | irq_set_thread_affinity(desc); |
198 | ret = 0; | 198 | ret = 0; |
@@ -304,7 +304,7 @@ static void irq_affinity_notify(struct work_struct *work) | |||
304 | if (irq_move_pending(&desc->irq_data)) | 304 | if (irq_move_pending(&desc->irq_data)) |
305 | irq_get_pending(cpumask, desc); | 305 | irq_get_pending(cpumask, desc); |
306 | else | 306 | else |
307 | cpumask_copy(cpumask, desc->irq_data.affinity); | 307 | cpumask_copy(cpumask, desc->irq_common_data.affinity); |
308 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 308 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
309 | 309 | ||
310 | notify->notify(notify, cpumask); | 310 | notify->notify(notify, cpumask); |
@@ -375,9 +375,9 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) | |||
375 | * one of the targets is online. | 375 | * one of the targets is online. |
376 | */ | 376 | */ |
377 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | 377 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
378 | if (cpumask_intersects(desc->irq_data.affinity, | 378 | if (cpumask_intersects(desc->irq_common_data.affinity, |
379 | cpu_online_mask)) | 379 | cpu_online_mask)) |
380 | set = desc->irq_data.affinity; | 380 | set = desc->irq_common_data.affinity; |
381 | else | 381 | else |
382 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 382 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
383 | } | 383 | } |
@@ -829,8 +829,8 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
829 | * This code is triggered unconditionally. Check the affinity | 829 | * This code is triggered unconditionally. Check the affinity |
830 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | 830 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. |
831 | */ | 831 | */ |
832 | if (desc->irq_data.affinity) | 832 | if (desc->irq_common_data.affinity) |
833 | cpumask_copy(mask, desc->irq_data.affinity); | 833 | cpumask_copy(mask, desc->irq_common_data.affinity); |
834 | else | 834 | else |
835 | valid = false; | 835 | valid = false; |
836 | raw_spin_unlock_irq(&desc->lock); | 836 | raw_spin_unlock_irq(&desc->lock); |
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 7e6512b9dc1f..be9149f62eb8 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c | |||
@@ -228,11 +228,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info) | |||
228 | { | 228 | { |
229 | struct irq_chip *chip = info->chip; | 229 | struct irq_chip *chip = info->chip; |
230 | 230 | ||
231 | BUG_ON(!chip); | 231 | BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); |
232 | if (!chip->irq_mask) | ||
233 | chip->irq_mask = pci_msi_mask_irq; | ||
234 | if (!chip->irq_unmask) | ||
235 | chip->irq_unmask = pci_msi_unmask_irq; | ||
236 | if (!chip->irq_set_affinity) | 232 | if (!chip->irq_set_affinity) |
237 | chip->irq_set_affinity = msi_domain_set_affinity; | 233 | chip->irq_set_affinity = msi_domain_set_affinity; |
238 | } | 234 | } |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 0e97c142ce40..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/mutex.h> | ||
15 | 16 | ||
16 | #include "internals.h" | 17 | #include "internals.h" |
17 | 18 | ||
@@ -39,7 +40,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
39 | static int show_irq_affinity(int type, struct seq_file *m, void *v) | 40 | static int show_irq_affinity(int type, struct seq_file *m, void *v) |
40 | { | 41 | { |
41 | struct irq_desc *desc = irq_to_desc((long)m->private); | 42 | struct irq_desc *desc = irq_to_desc((long)m->private); |
42 | const struct cpumask *mask = desc->irq_data.affinity; | 43 | const struct cpumask *mask = desc->irq_common_data.affinity; |
43 | 44 | ||
44 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 45 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
45 | if (irqd_is_setaffinity_pending(&desc->irq_data)) | 46 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
323 | 324 | ||
324 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 325 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
325 | { | 326 | { |
327 | static DEFINE_MUTEX(register_lock); | ||
326 | char name [MAX_NAMELEN]; | 328 | char name [MAX_NAMELEN]; |
327 | 329 | ||
328 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) | 330 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) |
329 | return; | 331 | return; |
330 | 332 | ||
333 | /* | ||
334 | * irq directories are registered only when a handler is | ||
335 | * added, not when the descriptor is created, so multiple | ||
336 | * tasks might try to register at the same time. | ||
337 | */ | ||
338 | mutex_lock(®ister_lock); | ||
339 | |||
340 | if (desc->dir) | ||
341 | goto out_unlock; | ||
342 | |||
331 | memset(name, 0, MAX_NAMELEN); | 343 | memset(name, 0, MAX_NAMELEN); |
332 | sprintf(name, "%d", irq); | 344 | sprintf(name, "%d", irq); |
333 | 345 | ||
334 | /* create /proc/irq/1234 */ | 346 | /* create /proc/irq/1234 */ |
335 | desc->dir = proc_mkdir(name, root_irq_dir); | 347 | desc->dir = proc_mkdir(name, root_irq_dir); |
336 | if (!desc->dir) | 348 | if (!desc->dir) |
337 | return; | 349 | goto out_unlock; |
338 | 350 | ||
339 | #ifdef CONFIG_SMP | 351 | #ifdef CONFIG_SMP |
340 | /* create /proc/irq/<irq>/smp_affinity */ | 352 | /* create /proc/irq/<irq>/smp_affinity */ |
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
355 | 367 | ||
356 | proc_create_data("spurious", 0444, desc->dir, | 368 | proc_create_data("spurious", 0444, desc->dir, |
357 | &irq_spurious_proc_fops, (void *)(long)irq); | 369 | &irq_spurious_proc_fops, (void *)(long)irq); |
370 | |||
371 | out_unlock: | ||
372 | mutex_unlock(®ister_lock); | ||
358 | } | 373 | } |
359 | 374 | ||
360 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) | 375 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index dd95f44f99b2..b86886beee4f 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -38,7 +38,7 @@ static void resend_irqs(unsigned long arg) | |||
38 | clear_bit(irq, irqs_resend); | 38 | clear_bit(irq, irqs_resend); |
39 | desc = irq_to_desc(irq); | 39 | desc = irq_to_desc(irq); |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | desc->handle_irq(irq, desc); | 41 | desc->handle_irq(desc); |
42 | local_irq_enable(); | 42 | local_irq_enable(); |
43 | } | 43 | } |
44 | } | 44 | } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); | |||
3068 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 3068 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
3069 | int trylock, int read, int check, int hardirqs_off, | 3069 | int trylock, int read, int check, int hardirqs_off, |
3070 | struct lockdep_map *nest_lock, unsigned long ip, | 3070 | struct lockdep_map *nest_lock, unsigned long ip, |
3071 | int references) | 3071 | int references, int pin_count) |
3072 | { | 3072 | { |
3073 | struct task_struct *curr = current; | 3073 | struct task_struct *curr = current; |
3074 | struct lock_class *class = NULL; | 3074 | struct lock_class *class = NULL; |
@@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3157 | hlock->waittime_stamp = 0; | 3157 | hlock->waittime_stamp = 0; |
3158 | hlock->holdtime_stamp = lockstat_clock(); | 3158 | hlock->holdtime_stamp = lockstat_clock(); |
3159 | #endif | 3159 | #endif |
3160 | hlock->pin_count = 0; | 3160 | hlock->pin_count = pin_count; |
3161 | 3161 | ||
3162 | if (check && !mark_irqflags(curr, hlock)) | 3162 | if (check && !mark_irqflags(curr, hlock)) |
3163 | return 0; | 3163 | return 0; |
@@ -3343,7 +3343,7 @@ found_it: | |||
3343 | hlock_class(hlock)->subclass, hlock->trylock, | 3343 | hlock_class(hlock)->subclass, hlock->trylock, |
3344 | hlock->read, hlock->check, hlock->hardirqs_off, | 3344 | hlock->read, hlock->check, hlock->hardirqs_off, |
3345 | hlock->nest_lock, hlock->acquire_ip, | 3345 | hlock->nest_lock, hlock->acquire_ip, |
3346 | hlock->references)) | 3346 | hlock->references, hlock->pin_count)) |
3347 | return 0; | 3347 | return 0; |
3348 | } | 3348 | } |
3349 | 3349 | ||
@@ -3433,7 +3433,7 @@ found_it: | |||
3433 | hlock_class(hlock)->subclass, hlock->trylock, | 3433 | hlock_class(hlock)->subclass, hlock->trylock, |
3434 | hlock->read, hlock->check, hlock->hardirqs_off, | 3434 | hlock->read, hlock->check, hlock->hardirqs_off, |
3435 | hlock->nest_lock, hlock->acquire_ip, | 3435 | hlock->nest_lock, hlock->acquire_ip, |
3436 | hlock->references)) | 3436 | hlock->references, hlock->pin_count)) |
3437 | return 0; | 3437 | return 0; |
3438 | } | 3438 | } |
3439 | 3439 | ||
@@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3583 | current->lockdep_recursion = 1; | 3583 | current->lockdep_recursion = 1; |
3584 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | 3584 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); |
3585 | __lock_acquire(lock, subclass, trylock, read, check, | 3585 | __lock_acquire(lock, subclass, trylock, read, check, |
3586 | irqs_disabled_flags(flags), nest_lock, ip, 0); | 3586 | irqs_disabled_flags(flags), nest_lock, ip, 0, 0); |
3587 | current->lockdep_recursion = 0; | 3587 | current->lockdep_recursion = 0; |
3588 | raw_local_irq_restore(flags); | 3588 | raw_local_irq_restore(flags); |
3589 | } | 3589 | } |
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 337c8818541d..87e9ce6a63c5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |||
289 | if (pv_enabled()) | 289 | if (pv_enabled()) |
290 | goto queue; | 290 | goto queue; |
291 | 291 | ||
292 | if (virt_queued_spin_lock(lock)) | 292 | if (virt_spin_lock(lock)) |
293 | return; | 293 | return; |
294 | 294 | ||
295 | /* | 295 | /* |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) | |||
3868 | static void __init | 3868 | static void __init |
3869 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | 3869 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
3870 | { | 3870 | { |
3871 | static struct lock_class_key rcu_exp_sched_rdp_class; | ||
3871 | unsigned long flags; | 3872 | unsigned long flags; |
3872 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 3873 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
3873 | struct rcu_node *rnp = rcu_get_root(rsp); | 3874 | struct rcu_node *rnp = rcu_get_root(rsp); |
@@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3883 | mutex_init(&rdp->exp_funnel_mutex); | 3884 | mutex_init(&rdp->exp_funnel_mutex); |
3884 | rcu_boot_init_nocb_percpu_data(rdp); | 3885 | rcu_boot_init_nocb_percpu_data(rdp); |
3885 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 3886 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
3887 | if (rsp == &rcu_sched_state) | ||
3888 | lockdep_set_class_and_name(&rdp->exp_funnel_mutex, | ||
3889 | &rcu_exp_sched_rdp_class, | ||
3890 | "rcu_data_exp_sched"); | ||
3886 | } | 3891 | } |
3887 | 3892 | ||
3888 | /* | 3893 | /* |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3595403921bd..10a8faa1b0d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -621,18 +621,21 @@ int get_nohz_timer_target(void) | |||
621 | int i, cpu = smp_processor_id(); | 621 | int i, cpu = smp_processor_id(); |
622 | struct sched_domain *sd; | 622 | struct sched_domain *sd; |
623 | 623 | ||
624 | if (!idle_cpu(cpu)) | 624 | if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) |
625 | return cpu; | 625 | return cpu; |
626 | 626 | ||
627 | rcu_read_lock(); | 627 | rcu_read_lock(); |
628 | for_each_domain(cpu, sd) { | 628 | for_each_domain(cpu, sd) { |
629 | for_each_cpu(i, sched_domain_span(sd)) { | 629 | for_each_cpu(i, sched_domain_span(sd)) { |
630 | if (!idle_cpu(i)) { | 630 | if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { |
631 | cpu = i; | 631 | cpu = i; |
632 | goto unlock; | 632 | goto unlock; |
633 | } | 633 | } |
634 | } | 634 | } |
635 | } | 635 | } |
636 | |||
637 | if (!is_housekeeping_cpu(cpu)) | ||
638 | cpu = housekeeping_any_cpu(); | ||
636 | unlock: | 639 | unlock: |
637 | rcu_read_unlock(); | 640 | rcu_read_unlock(); |
638 | return cpu; | 641 | return cpu; |
@@ -2514,11 +2517,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) | |||
2514 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls | 2517 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
2515 | * schedule one last time. The schedule call will never return, and | 2518 | * schedule one last time. The schedule call will never return, and |
2516 | * the scheduled task must drop that reference. | 2519 | * the scheduled task must drop that reference. |
2517 | * The test for TASK_DEAD must occur while the runqueue locks are | 2520 | * |
2518 | * still held, otherwise prev could be scheduled on another cpu, die | 2521 | * We must observe prev->state before clearing prev->on_cpu (in |
2519 | * there before we look at prev->state, and then the reference would | 2522 | * finish_lock_switch), otherwise a concurrent wakeup can get prev |
2520 | * be dropped twice. | 2523 | * running on another CPU and we could rave with its RUNNING -> DEAD |
2521 | * Manfred Spraul <manfred@colorfullife.com> | 2524 | * transition, resulting in a double drop. |
2522 | */ | 2525 | */ |
2523 | prev_state = prev->state; | 2526 | prev_state = prev->state; |
2524 | vtime_task_switch(prev); | 2527 | vtime_task_switch(prev); |
@@ -2666,13 +2669,20 @@ unsigned long nr_running(void) | |||
2666 | 2669 | ||
2667 | /* | 2670 | /* |
2668 | * Check if only the current task is running on the cpu. | 2671 | * Check if only the current task is running on the cpu. |
2672 | * | ||
2673 | * Caution: this function does not check that the caller has disabled | ||
2674 | * preemption, thus the result might have a time-of-check-to-time-of-use | ||
2675 | * race. The caller is responsible to use it correctly, for example: | ||
2676 | * | ||
2677 | * - from a non-preemptable section (of course) | ||
2678 | * | ||
2679 | * - from a thread that is bound to a single CPU | ||
2680 | * | ||
2681 | * - in a loop with very short iterations (e.g. a polling loop) | ||
2669 | */ | 2682 | */ |
2670 | bool single_task_running(void) | 2683 | bool single_task_running(void) |
2671 | { | 2684 | { |
2672 | if (cpu_rq(smp_processor_id())->nr_running == 1) | 2685 | return raw_rq()->nr_running == 1; |
2673 | return true; | ||
2674 | else | ||
2675 | return false; | ||
2676 | } | 2686 | } |
2677 | EXPORT_SYMBOL(single_task_running); | 2687 | EXPORT_SYMBOL(single_task_running); |
2678 | 2688 | ||
@@ -4924,7 +4934,15 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4924 | idle->state = TASK_RUNNING; | 4934 | idle->state = TASK_RUNNING; |
4925 | idle->se.exec_start = sched_clock(); | 4935 | idle->se.exec_start = sched_clock(); |
4926 | 4936 | ||
4927 | do_set_cpus_allowed(idle, cpumask_of(cpu)); | 4937 | #ifdef CONFIG_SMP |
4938 | /* | ||
4939 | * Its possible that init_idle() gets called multiple times on a task, | ||
4940 | * in that case do_set_cpus_allowed() will not do the right thing. | ||
4941 | * | ||
4942 | * And since this is boot we can forgo the serialization. | ||
4943 | */ | ||
4944 | set_cpus_allowed_common(idle, cpumask_of(cpu)); | ||
4945 | #endif | ||
4928 | /* | 4946 | /* |
4929 | * We're having a chicken and egg problem, even though we are | 4947 | * We're having a chicken and egg problem, even though we are |
4930 | * holding rq->lock, the cpu isn't yet set to this cpu so the | 4948 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
@@ -4941,7 +4959,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4941 | 4959 | ||
4942 | rq->curr = rq->idle = idle; | 4960 | rq->curr = rq->idle = idle; |
4943 | idle->on_rq = TASK_ON_RQ_QUEUED; | 4961 | idle->on_rq = TASK_ON_RQ_QUEUED; |
4944 | #if defined(CONFIG_SMP) | 4962 | #ifdef CONFIG_SMP |
4945 | idle->on_cpu = 1; | 4963 | idle->on_cpu = 1; |
4946 | #endif | 4964 | #endif |
4947 | raw_spin_unlock(&rq->lock); | 4965 | raw_spin_unlock(&rq->lock); |
@@ -4956,7 +4974,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4956 | idle->sched_class = &idle_sched_class; | 4974 | idle->sched_class = &idle_sched_class; |
4957 | ftrace_graph_init_idle_task(idle, cpu); | 4975 | ftrace_graph_init_idle_task(idle, cpu); |
4958 | vtime_init_idle(idle, cpu); | 4976 | vtime_init_idle(idle, cpu); |
4959 | #if defined(CONFIG_SMP) | 4977 | #ifdef CONFIG_SMP |
4960 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); | 4978 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
4961 | #endif | 4979 | #endif |
4962 | } | 4980 | } |
@@ -5178,24 +5196,47 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5178 | break; | 5196 | break; |
5179 | 5197 | ||
5180 | /* | 5198 | /* |
5181 | * Ensure rq->lock covers the entire task selection | 5199 | * pick_next_task assumes pinned rq->lock. |
5182 | * until the migration. | ||
5183 | */ | 5200 | */ |
5184 | lockdep_pin_lock(&rq->lock); | 5201 | lockdep_pin_lock(&rq->lock); |
5185 | next = pick_next_task(rq, &fake_task); | 5202 | next = pick_next_task(rq, &fake_task); |
5186 | BUG_ON(!next); | 5203 | BUG_ON(!next); |
5187 | next->sched_class->put_prev_task(rq, next); | 5204 | next->sched_class->put_prev_task(rq, next); |
5188 | 5205 | ||
5206 | /* | ||
5207 | * Rules for changing task_struct::cpus_allowed are holding | ||
5208 | * both pi_lock and rq->lock, such that holding either | ||
5209 | * stabilizes the mask. | ||
5210 | * | ||
5211 | * Drop rq->lock is not quite as disastrous as it usually is | ||
5212 | * because !cpu_active at this point, which means load-balance | ||
5213 | * will not interfere. Also, stop-machine. | ||
5214 | */ | ||
5215 | lockdep_unpin_lock(&rq->lock); | ||
5216 | raw_spin_unlock(&rq->lock); | ||
5217 | raw_spin_lock(&next->pi_lock); | ||
5218 | raw_spin_lock(&rq->lock); | ||
5219 | |||
5220 | /* | ||
5221 | * Since we're inside stop-machine, _nothing_ should have | ||
5222 | * changed the task, WARN if weird stuff happened, because in | ||
5223 | * that case the above rq->lock drop is a fail too. | ||
5224 | */ | ||
5225 | if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { | ||
5226 | raw_spin_unlock(&next->pi_lock); | ||
5227 | continue; | ||
5228 | } | ||
5229 | |||
5189 | /* Find suitable destination for @next, with force if needed. */ | 5230 | /* Find suitable destination for @next, with force if needed. */ |
5190 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); | 5231 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); |
5191 | 5232 | ||
5192 | lockdep_unpin_lock(&rq->lock); | ||
5193 | rq = __migrate_task(rq, next, dest_cpu); | 5233 | rq = __migrate_task(rq, next, dest_cpu); |
5194 | if (rq != dead_rq) { | 5234 | if (rq != dead_rq) { |
5195 | raw_spin_unlock(&rq->lock); | 5235 | raw_spin_unlock(&rq->lock); |
5196 | rq = dead_rq; | 5236 | rq = dead_rq; |
5197 | raw_spin_lock(&rq->lock); | 5237 | raw_spin_lock(&rq->lock); |
5198 | } | 5238 | } |
5239 | raw_spin_unlock(&next->pi_lock); | ||
5199 | } | 5240 | } |
5200 | 5241 | ||
5201 | rq->stop = stop; | 5242 | rq->stop = stop; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 68cda117574c..6d2a119c7ad9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
1078 | * After ->on_cpu is cleared, the task can be moved to a different CPU. | 1078 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
1079 | * We must ensure this doesn't happen until the switch is completely | 1079 | * We must ensure this doesn't happen until the switch is completely |
1080 | * finished. | 1080 | * finished. |
1081 | * | ||
1082 | * Pairs with the control dependency and rmb in try_to_wake_up(). | ||
1081 | */ | 1083 | */ |
1082 | smp_wmb(); | 1084 | smp_store_release(&prev->on_cpu, 0); |
1083 | prev->on_cpu = 0; | ||
1084 | #endif | 1085 | #endif |
1085 | #ifdef CONFIG_DEBUG_SPINLOCK | 1086 | #ifdef CONFIG_DEBUG_SPINLOCK |
1086 | /* this is a valid case when another task releases the spinlock */ | 1087 | /* this is a valid case when another task releases the spinlock */ |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 272d9322bc5d..052e02672d12 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -106,10 +106,9 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) | |||
106 | } | 106 | } |
107 | EXPORT_SYMBOL_GPL(__wake_up_locked); | 107 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
108 | 108 | ||
109 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, | 109 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) |
110 | void *key) | ||
111 | { | 110 | { |
112 | __wake_up_common(q, mode, nr, 0, key); | 111 | __wake_up_common(q, mode, 1, 0, key); |
113 | } | 112 | } |
114 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | 113 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); |
115 | 114 | ||
@@ -284,7 +283,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | |||
284 | if (!list_empty(&wait->task_list)) | 283 | if (!list_empty(&wait->task_list)) |
285 | list_del_init(&wait->task_list); | 284 | list_del_init(&wait->task_list); |
286 | else if (waitqueue_active(q)) | 285 | else if (waitqueue_active(q)) |
287 | __wake_up_locked_key(q, mode, 1, key); | 286 | __wake_up_locked_key(q, mode, key); |
288 | spin_unlock_irqrestore(&q->lock, flags); | 287 | spin_unlock_irqrestore(&q->lock, flags); |
289 | } | 288 | } |
290 | EXPORT_SYMBOL(abort_exclusive_wait); | 289 | EXPORT_SYMBOL(abort_exclusive_wait); |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 50eb107f1198..a9b76a40319e 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -97,20 +97,6 @@ EXPORT_SYMBOL_GPL(clockevent_delta2ns); | |||
97 | static int __clockevents_switch_state(struct clock_event_device *dev, | 97 | static int __clockevents_switch_state(struct clock_event_device *dev, |
98 | enum clock_event_state state) | 98 | enum clock_event_state state) |
99 | { | 99 | { |
100 | /* Transition with legacy set_mode() callback */ | ||
101 | if (dev->set_mode) { | ||
102 | /* Legacy callback doesn't support new modes */ | ||
103 | if (state > CLOCK_EVT_STATE_ONESHOT) | ||
104 | return -ENOSYS; | ||
105 | /* | ||
106 | * 'clock_event_state' and 'clock_event_mode' have 1-to-1 | ||
107 | * mapping until *_ONESHOT, and so a simple cast will work. | ||
108 | */ | ||
109 | dev->set_mode((enum clock_event_mode)state, dev); | ||
110 | dev->mode = (enum clock_event_mode)state; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | if (dev->features & CLOCK_EVT_FEAT_DUMMY) | 100 | if (dev->features & CLOCK_EVT_FEAT_DUMMY) |
115 | return 0; | 101 | return 0; |
116 | 102 | ||
@@ -204,12 +190,8 @@ int clockevents_tick_resume(struct clock_event_device *dev) | |||
204 | { | 190 | { |
205 | int ret = 0; | 191 | int ret = 0; |
206 | 192 | ||
207 | if (dev->set_mode) { | 193 | if (dev->tick_resume) |
208 | dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); | ||
209 | dev->mode = CLOCK_EVT_MODE_RESUME; | ||
210 | } else if (dev->tick_resume) { | ||
211 | ret = dev->tick_resume(dev); | 194 | ret = dev->tick_resume(dev); |
212 | } | ||
213 | 195 | ||
214 | return ret; | 196 | return ret; |
215 | } | 197 | } |
@@ -460,26 +442,6 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) | |||
460 | } | 442 | } |
461 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); | 443 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); |
462 | 444 | ||
463 | /* Sanity check of state transition callbacks */ | ||
464 | static int clockevents_sanity_check(struct clock_event_device *dev) | ||
465 | { | ||
466 | /* Legacy set_mode() callback */ | ||
467 | if (dev->set_mode) { | ||
468 | /* We shouldn't be supporting new modes now */ | ||
469 | WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || | ||
470 | dev->set_state_shutdown || dev->tick_resume || | ||
471 | dev->set_state_oneshot_stopped); | ||
472 | |||
473 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | if (dev->features & CLOCK_EVT_FEAT_DUMMY) | ||
478 | return 0; | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | /** | 445 | /** |
484 | * clockevents_register_device - register a clock event device | 446 | * clockevents_register_device - register a clock event device |
485 | * @dev: device to register | 447 | * @dev: device to register |
@@ -488,8 +450,6 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
488 | { | 450 | { |
489 | unsigned long flags; | 451 | unsigned long flags; |
490 | 452 | ||
491 | BUG_ON(clockevents_sanity_check(dev)); | ||
492 | |||
493 | /* Initialize state to DETACHED */ | 453 | /* Initialize state to DETACHED */ |
494 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); | 454 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); |
495 | 455 | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) | |||
217 | continue; | 217 | continue; |
218 | 218 | ||
219 | /* Check the deviation from the watchdog clocksource. */ | 219 | /* Check the deviation from the watchdog clocksource. */ |
220 | if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { | 220 | if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
221 | pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", | 221 | pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", |
222 | cs->name); | 222 | cs->name); |
223 | pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", | 223 | pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index d11c55b6ab7d..4fcd99e12aa0 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -398,7 +398,6 @@ void tick_shutdown(unsigned int cpu) | |||
398 | * the set mode function! | 398 | * the set mode function! |
399 | */ | 399 | */ |
400 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); | 400 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); |
401 | dev->mode = CLOCK_EVT_MODE_UNUSED; | ||
402 | clockevents_exchange_device(dev, NULL); | 401 | clockevents_exchange_device(dev, NULL); |
403 | dev->event_handler = clockevents_handle_noop; | 402 | dev->event_handler = clockevents_handle_noop; |
404 | td->evtdev = NULL; | 403 | td->evtdev = NULL; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3319e16f31e5..7c7ec4515983 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -290,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str) | |||
290 | __setup("nohz_full=", tick_nohz_full_setup); | 290 | __setup("nohz_full=", tick_nohz_full_setup); |
291 | 291 | ||
292 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, | 292 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
293 | unsigned long action, | 293 | unsigned long action, |
294 | void *hcpu) | 294 | void *hcpu) |
295 | { | 295 | { |
296 | unsigned int cpu = (unsigned long)hcpu; | 296 | unsigned int cpu = (unsigned long)hcpu; |
297 | 297 | ||
298 | switch (action & ~CPU_TASKS_FROZEN) { | 298 | switch (action & ~CPU_TASKS_FROZEN) { |
299 | case CPU_DOWN_PREPARE: | 299 | case CPU_DOWN_PREPARE: |
300 | /* | 300 | /* |
301 | * If we handle the timekeeping duty for full dynticks CPUs, | 301 | * The boot CPU handles housekeeping duty (unbound timers, |
302 | * we can't safely shutdown that CPU. | 302 | * workqueues, timekeeping, ...) on behalf of full dynticks |
303 | * CPUs. It must remain online when nohz full is enabled. | ||
303 | */ | 304 | */ |
304 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) | 305 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) |
305 | return NOTIFY_BAD; | 306 | return NOTIFY_BAD; |
@@ -370,6 +371,12 @@ void __init tick_nohz_init(void) | |||
370 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | 371 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
371 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", | 372 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
372 | cpumask_pr_args(tick_nohz_full_mask)); | 373 | cpumask_pr_args(tick_nohz_full_mask)); |
374 | |||
375 | /* | ||
376 | * We need at least one CPU to handle housekeeping work such | ||
377 | * as timekeeping, unbound timers, workqueues, ... | ||
378 | */ | ||
379 | WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); | ||
373 | } | 380 | } |
374 | #endif | 381 | #endif |
375 | 382 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f6ee2e6b6f5d..44d2cc0436f4 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1251,7 +1251,7 @@ void __init timekeeping_init(void) | |||
1251 | set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); | 1251 | set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); |
1252 | tk_set_wall_to_mono(tk, tmp); | 1252 | tk_set_wall_to_mono(tk, tmp); |
1253 | 1253 | ||
1254 | timekeeping_update(tk, TK_MIRROR); | 1254 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
1255 | 1255 | ||
1256 | write_seqcount_end(&tk_core.seq); | 1256 | write_seqcount_end(&tk_core.seq); |
1257 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1257 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk, | |||
1614 | negative = (tick_error < 0); | 1614 | negative = (tick_error < 0); |
1615 | 1615 | ||
1616 | /* Sort out the magnitude of the correction */ | 1616 | /* Sort out the magnitude of the correction */ |
1617 | tick_error = abs(tick_error); | 1617 | tick_error = abs64(tick_error); |
1618 | for (adj = 0; tick_error > interval; adj++) | 1618 | for (adj = 0; tick_error > interval; adj++) |
1619 | tick_error >>= 1; | 1619 | tick_error >>= 1; |
1620 | 1620 | ||
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 129c96033e46..f75e35b60149 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -225,7 +225,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) | |||
225 | (unsigned long long) dev->min_delta_ns); | 225 | (unsigned long long) dev->min_delta_ns); |
226 | SEQ_printf(m, " mult: %u\n", dev->mult); | 226 | SEQ_printf(m, " mult: %u\n", dev->mult); |
227 | SEQ_printf(m, " shift: %u\n", dev->shift); | 227 | SEQ_printf(m, " shift: %u\n", dev->shift); |
228 | SEQ_printf(m, " mode: %d\n", dev->mode); | 228 | SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev)); |
229 | SEQ_printf(m, " next_event: %Ld nsecs\n", | 229 | SEQ_printf(m, " next_event: %Ld nsecs\n", |
230 | (unsigned long long) ktime_to_ns(dev->next_event)); | 230 | (unsigned long long) ktime_to_ns(dev->next_event)); |
231 | 231 | ||
@@ -233,40 +233,34 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) | |||
233 | print_name_offset(m, dev->set_next_event); | 233 | print_name_offset(m, dev->set_next_event); |
234 | SEQ_printf(m, "\n"); | 234 | SEQ_printf(m, "\n"); |
235 | 235 | ||
236 | if (dev->set_mode) { | 236 | if (dev->set_state_shutdown) { |
237 | SEQ_printf(m, " set_mode: "); | 237 | SEQ_printf(m, " shutdown: "); |
238 | print_name_offset(m, dev->set_mode); | 238 | print_name_offset(m, dev->set_state_shutdown); |
239 | SEQ_printf(m, "\n"); | 239 | SEQ_printf(m, "\n"); |
240 | } else { | 240 | } |
241 | if (dev->set_state_shutdown) { | ||
242 | SEQ_printf(m, " shutdown: "); | ||
243 | print_name_offset(m, dev->set_state_shutdown); | ||
244 | SEQ_printf(m, "\n"); | ||
245 | } | ||
246 | 241 | ||
247 | if (dev->set_state_periodic) { | 242 | if (dev->set_state_periodic) { |
248 | SEQ_printf(m, " periodic: "); | 243 | SEQ_printf(m, " periodic: "); |
249 | print_name_offset(m, dev->set_state_periodic); | 244 | print_name_offset(m, dev->set_state_periodic); |
250 | SEQ_printf(m, "\n"); | 245 | SEQ_printf(m, "\n"); |
251 | } | 246 | } |
252 | 247 | ||
253 | if (dev->set_state_oneshot) { | 248 | if (dev->set_state_oneshot) { |
254 | SEQ_printf(m, " oneshot: "); | 249 | SEQ_printf(m, " oneshot: "); |
255 | print_name_offset(m, dev->set_state_oneshot); | 250 | print_name_offset(m, dev->set_state_oneshot); |
256 | SEQ_printf(m, "\n"); | 251 | SEQ_printf(m, "\n"); |
257 | } | 252 | } |
258 | 253 | ||
259 | if (dev->set_state_oneshot_stopped) { | 254 | if (dev->set_state_oneshot_stopped) { |
260 | SEQ_printf(m, " oneshot stopped: "); | 255 | SEQ_printf(m, " oneshot stopped: "); |
261 | print_name_offset(m, dev->set_state_oneshot_stopped); | 256 | print_name_offset(m, dev->set_state_oneshot_stopped); |
262 | SEQ_printf(m, "\n"); | 257 | SEQ_printf(m, "\n"); |
263 | } | 258 | } |
264 | 259 | ||
265 | if (dev->tick_resume) { | 260 | if (dev->tick_resume) { |
266 | SEQ_printf(m, " resume: "); | 261 | SEQ_printf(m, " resume: "); |
267 | print_name_offset(m, dev->tick_resume); | 262 | print_name_offset(m, dev->tick_resume); |
268 | SEQ_printf(m, "\n"); | 263 | SEQ_printf(m, "\n"); |
269 | } | ||
270 | } | 264 | } |
271 | 265 | ||
272 | SEQ_printf(m, " event_handler: "); | 266 | SEQ_printf(m, " event_handler: "); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ca71582fcfab..bcb14cafe007 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
1458 | timer_stats_timer_set_start_info(&dwork->timer); | 1458 | timer_stats_timer_set_start_info(&dwork->timer); |
1459 | 1459 | ||
1460 | dwork->wq = wq; | 1460 | dwork->wq = wq; |
1461 | /* timer isn't guaranteed to run in this cpu, record earlier */ | ||
1462 | if (cpu == WORK_CPU_UNBOUND) | ||
1463 | cpu = raw_smp_processor_id(); | ||
1461 | dwork->cpu = cpu; | 1464 | dwork->cpu = cpu; |
1462 | timer->expires = jiffies + delay; | 1465 | timer->expires = jiffies + delay; |
1463 | 1466 | ||
1464 | if (unlikely(cpu != WORK_CPU_UNBOUND)) | 1467 | add_timer_on(timer, cpu); |
1465 | add_timer_on(timer, cpu); | ||
1466 | else | ||
1467 | add_timer(timer); | ||
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | /** | 1470 | /** |
diff --git a/lib/Kconfig b/lib/Kconfig index 2e491ac15622..f0df318104e7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -220,6 +220,7 @@ config ZLIB_INFLATE | |||
220 | 220 | ||
221 | config ZLIB_DEFLATE | 221 | config ZLIB_DEFLATE |
222 | tristate | 222 | tristate |
223 | select BITREVERSE | ||
223 | 224 | ||
224 | config LZO_COMPRESS | 225 | config LZO_COMPRESS |
225 | tristate | 226 | tristate |
diff --git a/lib/iommu-common.c b/lib/iommu-common.c index ff19f66d3f7f..b1c93e94ca7a 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c | |||
@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common); | |||
21 | 21 | ||
22 | static inline bool need_flush(struct iommu_map_table *iommu) | 22 | static inline bool need_flush(struct iommu_map_table *iommu) |
23 | { | 23 | { |
24 | return (iommu->lazy_flush != NULL && | 24 | return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); |
25 | (iommu->flags & IOMMU_NEED_FLUSH) != 0); | ||
26 | } | 25 | } |
27 | 26 | ||
28 | static inline void set_flush(struct iommu_map_table *iommu) | 27 | static inline void set_flush(struct iommu_map_table *iommu) |
@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, | |||
211 | goto bail; | 210 | goto bail; |
212 | } | 211 | } |
213 | } | 212 | } |
214 | if (n < pool->hint || need_flush(iommu)) { | 213 | if (iommu->lazy_flush && |
214 | (n < pool->hint || need_flush(iommu))) { | ||
215 | clear_flush(iommu); | 215 | clear_flush(iommu); |
216 | iommu->lazy_flush(iommu); | 216 | iommu->lazy_flush(iommu); |
217 | } | 217 | } |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index cc0c69710dcf..a54ff8949f91 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) | |||
187 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], | 187 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
188 | new_tbl, new_hash); | 188 | new_tbl, new_hash); |
189 | 189 | ||
190 | if (rht_is_a_nulls(head)) | 190 | RCU_INIT_POINTER(entry->next, head); |
191 | INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); | ||
192 | else | ||
193 | RCU_INIT_POINTER(entry->next, head); | ||
194 | 191 | ||
195 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); | 192 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
196 | spin_unlock(new_bucket_lock); | 193 | spin_unlock(new_bucket_lock); |
diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..84775ba873b9 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include <linux/bug.h> | 27 | #include <linux/bug.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | 29 | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/word-at-a-time.h> | ||
32 | #include <asm/page.h> | ||
33 | |||
30 | #ifndef __HAVE_ARCH_STRNCASECMP | 34 | #ifndef __HAVE_ARCH_STRNCASECMP |
31 | /** | 35 | /** |
32 | * strncasecmp - Case insensitive, length-limited string comparison | 36 | * strncasecmp - Case insensitive, length-limited string comparison |
@@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) | |||
146 | EXPORT_SYMBOL(strlcpy); | 150 | EXPORT_SYMBOL(strlcpy); |
147 | #endif | 151 | #endif |
148 | 152 | ||
153 | #ifndef __HAVE_ARCH_STRSCPY | ||
154 | /** | ||
155 | * strscpy - Copy a C-string into a sized buffer | ||
156 | * @dest: Where to copy the string to | ||
157 | * @src: Where to copy the string from | ||
158 | * @count: Size of destination buffer | ||
159 | * | ||
160 | * Copy the string, or as much of it as fits, into the dest buffer. | ||
161 | * The routine returns the number of characters copied (not including | ||
162 | * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. | ||
163 | * The behavior is undefined if the string buffers overlap. | ||
164 | * The destination buffer is always NUL terminated, unless it's zero-sized. | ||
165 | * | ||
166 | * Preferred to strlcpy() since the API doesn't require reading memory | ||
167 | * from the src string beyond the specified "count" bytes, and since | ||
168 | * the return value is easier to error-check than strlcpy()'s. | ||
169 | * In addition, the implementation is robust to the string changing out | ||
170 | * from underneath it, unlike the current strlcpy() implementation. | ||
171 | * | ||
172 | * Preferred to strncpy() since it always returns a valid string, and | ||
173 | * doesn't unnecessarily force the tail of the destination buffer to be | ||
174 | * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() | ||
175 | * with an overflow test, then just memset() the tail of the dest buffer. | ||
176 | */ | ||
177 | ssize_t strscpy(char *dest, const char *src, size_t count) | ||
178 | { | ||
179 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; | ||
180 | size_t max = count; | ||
181 | long res = 0; | ||
182 | |||
183 | if (count == 0) | ||
184 | return -E2BIG; | ||
185 | |||
186 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
187 | /* | ||
188 | * If src is unaligned, don't cross a page boundary, | ||
189 | * since we don't know if the next page is mapped. | ||
190 | */ | ||
191 | if ((long)src & (sizeof(long) - 1)) { | ||
192 | size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); | ||
193 | if (limit < max) | ||
194 | max = limit; | ||
195 | } | ||
196 | #else | ||
197 | /* If src or dest is unaligned, don't do word-at-a-time. */ | ||
198 | if (((long) dest | (long) src) & (sizeof(long) - 1)) | ||
199 | max = 0; | ||
200 | #endif | ||
201 | |||
202 | while (max >= sizeof(unsigned long)) { | ||
203 | unsigned long c, data; | ||
204 | |||
205 | c = *(unsigned long *)(src+res); | ||
206 | if (has_zero(c, &data, &constants)) { | ||
207 | data = prep_zero_mask(c, data, &constants); | ||
208 | data = create_zero_mask(data); | ||
209 | *(unsigned long *)(dest+res) = c & zero_bytemask(data); | ||
210 | return res + find_zero(data); | ||
211 | } | ||
212 | *(unsigned long *)(dest+res) = c; | ||
213 | res += sizeof(unsigned long); | ||
214 | count -= sizeof(unsigned long); | ||
215 | max -= sizeof(unsigned long); | ||
216 | } | ||
217 | |||
218 | while (count) { | ||
219 | char c; | ||
220 | |||
221 | c = src[res]; | ||
222 | dest[res] = c; | ||
223 | if (!c) | ||
224 | return res; | ||
225 | res++; | ||
226 | count--; | ||
227 | } | ||
228 | |||
229 | /* Hit buffer length without finding a NUL; force NUL-termination. */ | ||
230 | if (res) | ||
231 | dest[res-1] = '\0'; | ||
232 | |||
233 | return -E2BIG; | ||
234 | } | ||
235 | EXPORT_SYMBOL(strscpy); | ||
236 | #endif | ||
237 | |||
149 | #ifndef __HAVE_ARCH_STRCAT | 238 | #ifndef __HAVE_ARCH_STRCAT |
150 | /** | 239 | /** |
151 | * strcat - Append one %NUL-terminated string to another | 240 | * strcat - Append one %NUL-terminated string to another |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 54036ce2e2dd..5939f63d90cd 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, | |||
59 | } | 59 | } |
60 | 60 | ||
61 | exp = divisor[units] / (u32)blk_size; | 61 | exp = divisor[units] / (u32)blk_size; |
62 | if (size >= exp) { | 62 | /* |
63 | * size must be strictly greater than exp here to ensure that remainder | ||
64 | * is greater than divisor[units] coming out of the if below. | ||
65 | */ | ||
66 | if (size > exp) { | ||
63 | remainder = do_div(size, divisor[units]); | 67 | remainder = do_div(size, divisor[units]); |
64 | remainder *= blk_size; | 68 | remainder *= blk_size; |
65 | i++; | 69 | i++; |
diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) | |||
394 | list_for_each_entry(page, &pool->page_list, page_list) { | 394 | list_for_each_entry(page, &pool->page_list, page_list) { |
395 | if (dma < page->dma) | 395 | if (dma < page->dma) |
396 | continue; | 396 | continue; |
397 | if (dma < (page->dma + pool->allocation)) | 397 | if ((dma - page->dma) < pool->allocation) |
398 | return page; | 398 | return page; |
399 | } | 399 | } |
400 | return NULL; | 400 | return NULL; |
diff --git a/mm/filemap.c b/mm/filemap.c index 72940fb38666..1cc5467cf36c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file, | |||
2473 | iov_iter_count(i)); | 2473 | iov_iter_count(i)); |
2474 | 2474 | ||
2475 | again: | 2475 | again: |
2476 | /* | ||
2477 | * Bring in the user page that we will copy from _first_. | ||
2478 | * Otherwise there's a nasty deadlock on copying from the | ||
2479 | * same page as we're writing to, without it being marked | ||
2480 | * up-to-date. | ||
2481 | * | ||
2482 | * Not only is this an optimisation, but it is also required | ||
2483 | * to check that the address is actually valid, when atomic | ||
2484 | * usercopies are used, below. | ||
2485 | */ | ||
2486 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | ||
2487 | status = -EFAULT; | ||
2488 | break; | ||
2489 | } | ||
2490 | |||
2476 | status = a_ops->write_begin(file, mapping, pos, bytes, flags, | 2491 | status = a_ops->write_begin(file, mapping, pos, bytes, flags, |
2477 | &page, &fsdata); | 2492 | &page, &fsdata); |
2478 | if (unlikely(status < 0)) | 2493 | if (unlikely(status < 0)) |
@@ -2480,17 +2495,8 @@ again: | |||
2480 | 2495 | ||
2481 | if (mapping_writably_mapped(mapping)) | 2496 | if (mapping_writably_mapped(mapping)) |
2482 | flush_dcache_page(page); | 2497 | flush_dcache_page(page); |
2483 | /* | 2498 | |
2484 | * 'page' is now locked. If we are trying to copy from a | ||
2485 | * mapping of 'page' in userspace, the copy might fault and | ||
2486 | * would need PageUptodate() to complete. But, page can not be | ||
2487 | * made Uptodate without acquiring the page lock, which we hold. | ||
2488 | * Deadlock. Avoid with pagefault_disable(). Fix up below with | ||
2489 | * iov_iter_fault_in_readable(). | ||
2490 | */ | ||
2491 | pagefault_disable(); | ||
2492 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | 2499 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
2493 | pagefault_enable(); | ||
2494 | flush_dcache_page(page); | 2500 | flush_dcache_page(page); |
2495 | 2501 | ||
2496 | status = a_ops->write_end(file, mapping, pos, bytes, copied, | 2502 | status = a_ops->write_end(file, mapping, pos, bytes, copied, |
@@ -2513,14 +2519,6 @@ again: | |||
2513 | */ | 2519 | */ |
2514 | bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, | 2520 | bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, |
2515 | iov_iter_single_seg_count(i)); | 2521 | iov_iter_single_seg_count(i)); |
2516 | /* | ||
2517 | * This is the fallback to recover if the copy from | ||
2518 | * userspace above faults. | ||
2519 | */ | ||
2520 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | ||
2521 | status = -EFAULT; | ||
2522 | break; | ||
2523 | } | ||
2524 | goto again; | 2522 | goto again; |
2525 | } | 2523 | } |
2526 | pos += copied; | 2524 | pos += copied; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3202 | continue; | 3202 | continue; |
3203 | 3203 | ||
3204 | /* | 3204 | /* |
3205 | * Shared VMAs have their own reserves and do not affect | ||
3206 | * MAP_PRIVATE accounting but it is possible that a shared | ||
3207 | * VMA is using the same page so check and skip such VMAs. | ||
3208 | */ | ||
3209 | if (iter_vma->vm_flags & VM_MAYSHARE) | ||
3210 | continue; | ||
3211 | |||
3212 | /* | ||
3205 | * Unmap the page from other VMAs without their own reserves. | 3213 | * Unmap the page from other VMAs without their own reserves. |
3206 | * They get marked to be SIGKILLed if they fault in these | 3214 | * They get marked to be SIGKILLed if they fault in these |
3207 | * areas. This is because a future no-page fault on this VMA | 3215 | * areas. This is because a future no-page fault on this VMA |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 7b28e9cdf1c7..8da211411b57 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) | |||
135 | 135 | ||
136 | if (unlikely(*shadow_addr)) { | 136 | if (unlikely(*shadow_addr)) { |
137 | u16 shadow_first_bytes = *(u16 *)shadow_addr; | 137 | u16 shadow_first_bytes = *(u16 *)shadow_addr; |
138 | s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK; | ||
139 | 138 | ||
140 | if (unlikely(shadow_first_bytes)) | 139 | if (unlikely(shadow_first_bytes)) |
141 | return true; | 140 | return true; |
142 | 141 | ||
143 | if (likely(!last_byte)) | 142 | if (likely(IS_ALIGNED(addr, 8))) |
144 | return false; | 143 | return false; |
145 | 144 | ||
146 | return memory_is_poisoned_1(addr + 15); | 145 | return memory_is_poisoned_1(addr + 15); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..d9b5c817dce8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
644 | } | 644 | } |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * Return page count for single (non recursive) @memcg. | ||
648 | * | ||
647 | * Implementation Note: reading percpu statistics for memcg. | 649 | * Implementation Note: reading percpu statistics for memcg. |
648 | * | 650 | * |
649 | * Both of vmstat[] and percpu_counter has threshold and do periodic | 651 | * Both of vmstat[] and percpu_counter has threshold and do periodic |
650 | * synchronization to implement "quick" read. There are trade-off between | 652 | * synchronization to implement "quick" read. There are trade-off between |
651 | * reading cost and precision of value. Then, we may have a chance to implement | 653 | * reading cost and precision of value. Then, we may have a chance to implement |
652 | * a periodic synchronizion of counter in memcg's counter. | 654 | * a periodic synchronization of counter in memcg's counter. |
653 | * | 655 | * |
654 | * But this _read() function is used for user interface now. The user accounts | 656 | * But this _read() function is used for user interface now. The user accounts |
655 | * memory usage by memory cgroup and he _always_ requires exact value because | 657 | * memory usage by memory cgroup and he _always_ requires exact value because |
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
659 | * | 661 | * |
660 | * If there are kernel internal actions which can make use of some not-exact | 662 | * If there are kernel internal actions which can make use of some not-exact |
661 | * value, and reading all cpu value can be performance bottleneck in some | 663 | * value, and reading all cpu value can be performance bottleneck in some |
662 | * common workload, threashold and synchonization as vmstat[] should be | 664 | * common workload, threshold and synchronization as vmstat[] should be |
663 | * implemented. | 665 | * implemented. |
664 | */ | 666 | */ |
665 | static long mem_cgroup_read_stat(struct mem_cgroup *memcg, | 667 | static unsigned long |
666 | enum mem_cgroup_stat_index idx) | 668 | mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) |
667 | { | 669 | { |
668 | long val = 0; | 670 | long val = 0; |
669 | int cpu; | 671 | int cpu; |
670 | 672 | ||
673 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
671 | for_each_possible_cpu(cpu) | 674 | for_each_possible_cpu(cpu) |
672 | val += per_cpu(memcg->stat->count[idx], cpu); | 675 | val += per_cpu(memcg->stat->count[idx], cpu); |
676 | /* | ||
677 | * Summing races with updates, so val may be negative. Avoid exposing | ||
678 | * transient negative values. | ||
679 | */ | ||
680 | if (val < 0) | ||
681 | val = 0; | ||
673 | return val; | 682 | return val; |
674 | } | 683 | } |
675 | 684 | ||
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1254 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 1263 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
1255 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 1264 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
1256 | continue; | 1265 | continue; |
1257 | pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], | 1266 | pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], |
1258 | K(mem_cgroup_read_stat(iter, i))); | 1267 | K(mem_cgroup_read_stat(iter, i))); |
1259 | } | 1268 | } |
1260 | 1269 | ||
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, | |||
2819 | enum mem_cgroup_stat_index idx) | 2828 | enum mem_cgroup_stat_index idx) |
2820 | { | 2829 | { |
2821 | struct mem_cgroup *iter; | 2830 | struct mem_cgroup *iter; |
2822 | long val = 0; | 2831 | unsigned long val = 0; |
2823 | 2832 | ||
2824 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
2825 | for_each_mem_cgroup_tree(iter, memcg) | 2833 | for_each_mem_cgroup_tree(iter, memcg) |
2826 | val += mem_cgroup_read_stat(iter, idx); | 2834 | val += mem_cgroup_read_stat(iter, idx); |
2827 | 2835 | ||
2828 | if (val < 0) /* race ? */ | ||
2829 | val = 0; | ||
2830 | return val; | 2836 | return val; |
2831 | } | 2837 | } |
2832 | 2838 | ||
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
3169 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 3175 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
3170 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 3176 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
3171 | continue; | 3177 | continue; |
3172 | seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], | 3178 | seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], |
3173 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); | 3179 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); |
3174 | } | 3180 | } |
3175 | 3181 | ||
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
3194 | (u64)memsw * PAGE_SIZE); | 3200 | (u64)memsw * PAGE_SIZE); |
3195 | 3201 | ||
3196 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 3202 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
3197 | long long val = 0; | 3203 | unsigned long long val = 0; |
3198 | 3204 | ||
3199 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 3205 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
3200 | continue; | 3206 | continue; |
3201 | for_each_mem_cgroup_tree(mi, memcg) | 3207 | for_each_mem_cgroup_tree(mi, memcg) |
3202 | val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; | 3208 | val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; |
3203 | seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); | 3209 | seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); |
3204 | } | 3210 | } |
3205 | 3211 | ||
3206 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { | 3212 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { |
@@ -3381,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, | |||
3381 | ret = page_counter_memparse(args, "-1", &threshold); | 3387 | ret = page_counter_memparse(args, "-1", &threshold); |
3382 | if (ret) | 3388 | if (ret) |
3383 | return ret; | 3389 | return ret; |
3390 | threshold <<= PAGE_SHIFT; | ||
3384 | 3391 | ||
3385 | mutex_lock(&memcg->thresholds_lock); | 3392 | mutex_lock(&memcg->thresholds_lock); |
3386 | 3393 | ||
@@ -4179,7 +4186,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
4179 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) | 4186 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) |
4180 | goto out_free_stat; | 4187 | goto out_free_stat; |
4181 | 4188 | ||
4182 | spin_lock_init(&memcg->pcp_counter_lock); | ||
4183 | return memcg; | 4189 | return memcg; |
4184 | 4190 | ||
4185 | out_free_stat: | 4191 | out_free_stat: |
diff --git a/mm/memory.c b/mm/memory.c index 9cb27470fee9..deb679c31f2a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2426 | if (details.last_index < details.first_index) | 2426 | if (details.last_index < details.first_index) |
2427 | details.last_index = ULONG_MAX; | 2427 | details.last_index = ULONG_MAX; |
2428 | 2428 | ||
2429 | |||
2430 | /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ | ||
2429 | i_mmap_lock_write(mapping); | 2431 | i_mmap_lock_write(mapping); |
2430 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) | 2432 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) |
2431 | unmap_mapping_range_tree(&mapping->i_mmap, &details); | 2433 | unmap_mapping_range_tree(&mapping->i_mmap, &details); |
diff --git a/mm/migrate.c b/mm/migrate.c index c3cb566af3e2..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
740 | if (PageSwapBacked(page)) | 740 | if (PageSwapBacked(page)) |
741 | SetPageSwapBacked(newpage); | 741 | SetPageSwapBacked(newpage); |
742 | 742 | ||
743 | /* | ||
744 | * Indirectly called below, migrate_page_copy() copies PG_dirty and thus | ||
745 | * needs newpage's memcg set to transfer memcg dirty page accounting. | ||
746 | * So perform memcg migration in two steps: | ||
747 | * 1. set newpage->mem_cgroup (here) | ||
748 | * 2. clear page->mem_cgroup (below) | ||
749 | */ | ||
750 | set_page_memcg(newpage, page_memcg(page)); | ||
751 | |||
743 | mapping = page_mapping(page); | 752 | mapping = page_mapping(page); |
744 | if (!mapping) | 753 | if (!mapping) |
745 | rc = migrate_page(mapping, newpage, page, mode); | 754 | rc = migrate_page(mapping, newpage, page, mode); |
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
756 | rc = fallback_migrate_page(mapping, newpage, page, mode); | 765 | rc = fallback_migrate_page(mapping, newpage, page, mode); |
757 | 766 | ||
758 | if (rc != MIGRATEPAGE_SUCCESS) { | 767 | if (rc != MIGRATEPAGE_SUCCESS) { |
768 | set_page_memcg(newpage, NULL); | ||
759 | newpage->mapping = NULL; | 769 | newpage->mapping = NULL; |
760 | } else { | 770 | } else { |
761 | mem_cgroup_migrate(page, newpage, false); | 771 | set_page_memcg(page, NULL); |
762 | if (page_was_mapped) | 772 | if (page_was_mapped) |
763 | remove_migration_ptes(page, newpage); | 773 | remove_migration_ptes(page, newpage); |
764 | page->mapping = NULL; | 774 | page->mapping = NULL; |
@@ -1075,7 +1085,7 @@ out: | |||
1075 | if (rc != MIGRATEPAGE_SUCCESS && put_new_page) | 1085 | if (rc != MIGRATEPAGE_SUCCESS && put_new_page) |
1076 | put_new_page(new_hpage, private); | 1086 | put_new_page(new_hpage, private); |
1077 | else | 1087 | else |
1078 | put_page(new_hpage); | 1088 | putback_active_hugepage(new_hpage); |
1079 | 1089 | ||
1080 | if (result) { | 1090 | if (result) { |
1081 | if (rc) | 1091 | if (rc) |
@@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, | |||
612 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | 612 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
613 | struct rb_node **rb_link, struct rb_node *rb_parent) | 613 | struct rb_node **rb_link, struct rb_node *rb_parent) |
614 | { | 614 | { |
615 | WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops"); | ||
616 | |||
617 | /* Update tracking information for the gap following the new vma. */ | 615 | /* Update tracking information for the gap following the new vma. */ |
618 | if (vma->vm_next) | 616 | if (vma->vm_next) |
619 | vma_gap_update(vma->vm_next); | 617 | vma_gap_update(vma->vm_next); |
@@ -1492,13 +1490,14 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | |||
1492 | int vma_wants_writenotify(struct vm_area_struct *vma) | 1490 | int vma_wants_writenotify(struct vm_area_struct *vma) |
1493 | { | 1491 | { |
1494 | vm_flags_t vm_flags = vma->vm_flags; | 1492 | vm_flags_t vm_flags = vma->vm_flags; |
1493 | const struct vm_operations_struct *vm_ops = vma->vm_ops; | ||
1495 | 1494 | ||
1496 | /* If it was private or non-writable, the write bit is already clear */ | 1495 | /* If it was private or non-writable, the write bit is already clear */ |
1497 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) | 1496 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) |
1498 | return 0; | 1497 | return 0; |
1499 | 1498 | ||
1500 | /* The backer wishes to know when pages are first written to? */ | 1499 | /* The backer wishes to know when pages are first written to? */ |
1501 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) | 1500 | if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) |
1502 | return 1; | 1501 | return 1; |
1503 | 1502 | ||
1504 | /* The open routine did something to the protections that pgprot_modify | 1503 | /* The open routine did something to the protections that pgprot_modify |
@@ -1638,12 +1637,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, | |||
1638 | */ | 1637 | */ |
1639 | WARN_ON_ONCE(addr != vma->vm_start); | 1638 | WARN_ON_ONCE(addr != vma->vm_start); |
1640 | 1639 | ||
1641 | /* All file mapping must have ->vm_ops set */ | ||
1642 | if (!vma->vm_ops) { | ||
1643 | static const struct vm_operations_struct dummy_ops = {}; | ||
1644 | vma->vm_ops = &dummy_ops; | ||
1645 | } | ||
1646 | |||
1647 | addr = vma->vm_start; | 1640 | addr = vma->vm_start; |
1648 | vm_flags = vma->vm_flags; | 1641 | vm_flags = vma->vm_flags; |
1649 | } else if (vm_flags & VM_SHARED) { | 1642 | } else if (vm_flags & VM_SHARED) { |
diff --git a/mm/readahead.c b/mm/readahead.c index 60cd846a9a44..24682f6f4cfd 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |||
89 | while (!list_empty(pages)) { | 89 | while (!list_empty(pages)) { |
90 | page = list_to_page(pages); | 90 | page = list_to_page(pages); |
91 | list_del(&page->lru); | 91 | list_del(&page->lru); |
92 | if (add_to_page_cache_lru(page, mapping, | 92 | if (add_to_page_cache_lru(page, mapping, page->index, |
93 | page->index, GFP_KERNEL)) { | 93 | GFP_KERNEL & mapping_gfp_mask(mapping))) { |
94 | read_cache_pages_invalidate_page(mapping, page); | 94 | read_cache_pages_invalidate_page(mapping, page); |
95 | continue; | 95 | continue; |
96 | } | 96 | } |
@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, | |||
127 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 127 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
128 | struct page *page = list_to_page(pages); | 128 | struct page *page = list_to_page(pages); |
129 | list_del(&page->lru); | 129 | list_del(&page->lru); |
130 | if (!add_to_page_cache_lru(page, mapping, | 130 | if (!add_to_page_cache_lru(page, mapping, page->index, |
131 | page->index, GFP_KERNEL)) { | 131 | GFP_KERNEL & mapping_gfp_mask(mapping))) { |
132 | mapping->a_ops->readpage(filp, page); | 132 | mapping->a_ops->readpage(filp, page); |
133 | } | 133 | } |
134 | page_cache_release(page); | 134 | page_cache_release(page); |
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2190 | size += BYTES_PER_WORD; | 2190 | size += BYTES_PER_WORD; |
2191 | } | 2191 | } |
2192 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2192 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2193 | if (size >= kmalloc_size(INDEX_NODE + 1) | 2193 | /* |
2194 | && cachep->object_size > cache_line_size() | 2194 | * To activate debug pagealloc, off-slab management is necessary |
2195 | && ALIGN(size, cachep->align) < PAGE_SIZE) { | 2195 | * requirement. In early phase of initialization, small sized slab |
2196 | * doesn't get initialized so it would not be possible. So, we need | ||
2197 | * to check size >= 256. It guarantees that all necessary small | ||
2198 | * sized slab is initialized in current slab initialization sequence. | ||
2199 | */ | ||
2200 | if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && | ||
2201 | size >= 256 && cachep->object_size > cache_line_size() && | ||
2202 | ALIGN(size, cachep->align) < PAGE_SIZE) { | ||
2196 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | 2203 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); |
2197 | size = PAGE_SIZE; | 2204 | size = PAGE_SIZE; |
2198 | } | 2205 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2d978b28a410..7f63a9381f71 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc) | |||
175 | if (!memcg) | 175 | if (!memcg) |
176 | return true; | 176 | return true; |
177 | #ifdef CONFIG_CGROUP_WRITEBACK | 177 | #ifdef CONFIG_CGROUP_WRITEBACK |
178 | if (memcg->css.cgroup) | 178 | if (cgroup_on_dfl(memcg->css.cgroup)) |
179 | return true; | 179 | return true; |
180 | #endif | 180 | #endif |
181 | return false; | 181 | return false; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 4f5cd974e11a..fbf14485a049 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off; | |||
1363 | 1363 | ||
1364 | static void vmstat_update(struct work_struct *w) | 1364 | static void vmstat_update(struct work_struct *w) |
1365 | { | 1365 | { |
1366 | if (refresh_cpu_vm_stats()) | 1366 | if (refresh_cpu_vm_stats()) { |
1367 | /* | 1367 | /* |
1368 | * Counters were updated so we expect more updates | 1368 | * Counters were updated so we expect more updates |
1369 | * to occur in the future. Keep on running the | 1369 | * to occur in the future. Keep on running the |
1370 | * update worker thread. | 1370 | * update worker thread. |
1371 | */ | 1371 | */ |
1372 | schedule_delayed_work(this_cpu_ptr(&vmstat_work), | 1372 | schedule_delayed_work_on(smp_processor_id(), |
1373 | this_cpu_ptr(&vmstat_work), | ||
1373 | round_jiffies_relative(sysctl_stat_interval)); | 1374 | round_jiffies_relative(sysctl_stat_interval)); |
1374 | else { | 1375 | } else { |
1375 | /* | 1376 | /* |
1376 | * We did not update any counters so the app may be in | 1377 | * We did not update any counters so the app may be in |
1377 | * a mode where it does not cause counter updates. | 1378 | * a mode where it does not cause counter updates. |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 17e55dfecbe2..e07f551a863c 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh) | |||
317 | 317 | ||
318 | static int clip_encap(struct atm_vcc *vcc, int mode) | 318 | static int clip_encap(struct atm_vcc *vcc, int mode) |
319 | { | 319 | { |
320 | if (!CLIP_VCC(vcc)) | ||
321 | return -EBADFD; | ||
322 | |||
320 | CLIP_VCC(vcc)->encap = mode; | 323 | CLIP_VCC(vcc)->encap = mode; |
321 | return 0; | 324 | return 0; |
322 | } | 325 | } |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ad82324f710f..0510a577a7b5 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) | |||
2311 | if (!conn) | 2311 | if (!conn) |
2312 | return 1; | 2312 | return 1; |
2313 | 2313 | ||
2314 | chan = conn->smp; | ||
2315 | if (!chan) { | ||
2316 | BT_ERR("SMP security requested but not available"); | ||
2317 | return 1; | ||
2318 | } | ||
2319 | |||
2320 | if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) | 2314 | if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) |
2321 | return 1; | 2315 | return 1; |
2322 | 2316 | ||
@@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) | |||
2330 | if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) | 2324 | if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) |
2331 | return 0; | 2325 | return 0; |
2332 | 2326 | ||
2327 | chan = conn->smp; | ||
2328 | if (!chan) { | ||
2329 | BT_ERR("SMP security requested but not available"); | ||
2330 | return 1; | ||
2331 | } | ||
2332 | |||
2333 | l2cap_chan_lock(chan); | 2333 | l2cap_chan_lock(chan); |
2334 | 2334 | ||
2335 | /* If SMP is already in progress ignore this request */ | 2335 | /* If SMP is already in progress ignore this request */ |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 66efdc21f548..480b3de1a0e3 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1006,7 +1006,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | |||
1006 | 1006 | ||
1007 | ih = igmpv3_report_hdr(skb); | 1007 | ih = igmpv3_report_hdr(skb); |
1008 | num = ntohs(ih->ngrec); | 1008 | num = ntohs(ih->ngrec); |
1009 | len = sizeof(*ih); | 1009 | len = skb_transport_offset(skb) + sizeof(*ih); |
1010 | 1010 | ||
1011 | for (i = 0; i < num; i++) { | 1011 | for (i = 0; i < num; i++) { |
1012 | len += sizeof(*grec); | 1012 | len += sizeof(*grec); |
@@ -1067,7 +1067,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1067 | 1067 | ||
1068 | icmp6h = icmp6_hdr(skb); | 1068 | icmp6h = icmp6_hdr(skb); |
1069 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); | 1069 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); |
1070 | len = sizeof(*icmp6h); | 1070 | len = skb_transport_offset(skb) + sizeof(*icmp6h); |
1071 | 1071 | ||
1072 | for (i = 0; i < num; i++) { | 1072 | for (i = 0; i < num; i++) { |
1073 | __be16 *nsrcs, _nsrcs; | 1073 | __be16 *nsrcs, _nsrcs; |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 525f454f7531..b9b0e3b5da49 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -1353,11 +1353,12 @@ static void prepare_write_keepalive(struct ceph_connection *con) | |||
1353 | dout("prepare_write_keepalive %p\n", con); | 1353 | dout("prepare_write_keepalive %p\n", con); |
1354 | con_out_kvec_reset(con); | 1354 | con_out_kvec_reset(con); |
1355 | if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { | 1355 | if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { |
1356 | struct timespec ts = CURRENT_TIME; | 1356 | struct timespec now = CURRENT_TIME; |
1357 | struct ceph_timespec ceph_ts; | 1357 | |
1358 | ceph_encode_timespec(&ceph_ts, &ts); | ||
1359 | con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); | 1358 | con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); |
1360 | con_out_kvec_add(con, sizeof(ceph_ts), &ceph_ts); | 1359 | ceph_encode_timespec(&con->out_temp_keepalive2, &now); |
1360 | con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), | ||
1361 | &con->out_temp_keepalive2); | ||
1361 | } else { | 1362 | } else { |
1362 | con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); | 1363 | con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); |
1363 | } | 1364 | } |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 80b94e37c94a..f79ccac6699f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, | |||
285 | switch (op->op) { | 285 | switch (op->op) { |
286 | case CEPH_OSD_OP_READ: | 286 | case CEPH_OSD_OP_READ: |
287 | case CEPH_OSD_OP_WRITE: | 287 | case CEPH_OSD_OP_WRITE: |
288 | case CEPH_OSD_OP_WRITEFULL: | ||
288 | ceph_osd_data_release(&op->extent.osd_data); | 289 | ceph_osd_data_release(&op->extent.osd_data); |
289 | break; | 290 | break; |
290 | case CEPH_OSD_OP_CALL: | 291 | case CEPH_OSD_OP_CALL: |
@@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, | |||
485 | size_t payload_len = 0; | 486 | size_t payload_len = 0; |
486 | 487 | ||
487 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && | 488 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && |
488 | opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE); | 489 | opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && |
490 | opcode != CEPH_OSD_OP_TRUNCATE); | ||
489 | 491 | ||
490 | op->extent.offset = offset; | 492 | op->extent.offset = offset; |
491 | op->extent.length = length; | 493 | op->extent.length = length; |
492 | op->extent.truncate_size = truncate_size; | 494 | op->extent.truncate_size = truncate_size; |
493 | op->extent.truncate_seq = truncate_seq; | 495 | op->extent.truncate_seq = truncate_seq; |
494 | if (opcode == CEPH_OSD_OP_WRITE) | 496 | if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) |
495 | payload_len += length; | 497 | payload_len += length; |
496 | 498 | ||
497 | op->payload_len = payload_len; | 499 | op->payload_len = payload_len; |
@@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, | |||
670 | break; | 672 | break; |
671 | case CEPH_OSD_OP_READ: | 673 | case CEPH_OSD_OP_READ: |
672 | case CEPH_OSD_OP_WRITE: | 674 | case CEPH_OSD_OP_WRITE: |
675 | case CEPH_OSD_OP_WRITEFULL: | ||
673 | case CEPH_OSD_OP_ZERO: | 676 | case CEPH_OSD_OP_ZERO: |
674 | case CEPH_OSD_OP_TRUNCATE: | 677 | case CEPH_OSD_OP_TRUNCATE: |
675 | if (src->op == CEPH_OSD_OP_WRITE) | 678 | if (src->op == CEPH_OSD_OP_WRITE || |
679 | src->op == CEPH_OSD_OP_WRITEFULL) | ||
676 | request_data_len = src->extent.length; | 680 | request_data_len = src->extent.length; |
677 | dst->extent.offset = cpu_to_le64(src->extent.offset); | 681 | dst->extent.offset = cpu_to_le64(src->extent.offset); |
678 | dst->extent.length = cpu_to_le64(src->extent.length); | 682 | dst->extent.length = cpu_to_le64(src->extent.length); |
@@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, | |||
681 | dst->extent.truncate_seq = | 685 | dst->extent.truncate_seq = |
682 | cpu_to_le32(src->extent.truncate_seq); | 686 | cpu_to_le32(src->extent.truncate_seq); |
683 | osd_data = &src->extent.osd_data; | 687 | osd_data = &src->extent.osd_data; |
684 | if (src->op == CEPH_OSD_OP_WRITE) | 688 | if (src->op == CEPH_OSD_OP_WRITE || |
689 | src->op == CEPH_OSD_OP_WRITEFULL) | ||
685 | ceph_osdc_msg_data_add(req->r_request, osd_data); | 690 | ceph_osdc_msg_data_add(req->r_request, osd_data); |
686 | else | 691 | else |
687 | ceph_osdc_msg_data_add(req->r_reply, osd_data); | 692 | ceph_osdc_msg_data_add(req->r_reply, osd_data); |
diff --git a/net/core/dev.c b/net/core/dev.c index 877c84834d81..6bb6470f5b7b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4713,6 +4713,8 @@ void napi_disable(struct napi_struct *n) | |||
4713 | 4713 | ||
4714 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 4714 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
4715 | msleep(1); | 4715 | msleep(1); |
4716 | while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) | ||
4717 | msleep(1); | ||
4716 | 4718 | ||
4717 | hrtimer_cancel(&n->timer); | 4719 | hrtimer_cancel(&n->timer); |
4718 | 4720 | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bf77e3639ce0..365de66436ac 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -631,15 +631,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, | |||
631 | { | 631 | { |
632 | int idx = 0; | 632 | int idx = 0; |
633 | struct fib_rule *rule; | 633 | struct fib_rule *rule; |
634 | int err = 0; | ||
634 | 635 | ||
635 | rcu_read_lock(); | 636 | rcu_read_lock(); |
636 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { | 637 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { |
637 | if (idx < cb->args[1]) | 638 | if (idx < cb->args[1]) |
638 | goto skip; | 639 | goto skip; |
639 | 640 | ||
640 | if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, | 641 | err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, |
641 | cb->nlh->nlmsg_seq, RTM_NEWRULE, | 642 | cb->nlh->nlmsg_seq, RTM_NEWRULE, |
642 | NLM_F_MULTI, ops) < 0) | 643 | NLM_F_MULTI, ops); |
644 | if (err) | ||
643 | break; | 645 | break; |
644 | skip: | 646 | skip: |
645 | idx++; | 647 | idx++; |
@@ -648,7 +650,7 @@ skip: | |||
648 | cb->args[1] = idx; | 650 | cb->args[1] = idx; |
649 | rules_ops_put(ops); | 651 | rules_ops_put(ops); |
650 | 652 | ||
651 | return skb->len; | 653 | return err; |
652 | } | 654 | } |
653 | 655 | ||
654 | static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | 656 | static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) |
@@ -664,7 +666,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
664 | if (ops == NULL) | 666 | if (ops == NULL) |
665 | return -EAFNOSUPPORT; | 667 | return -EAFNOSUPPORT; |
666 | 668 | ||
667 | return dump_rules(skb, cb, ops); | 669 | dump_rules(skb, cb, ops); |
670 | |||
671 | return skb->len; | ||
668 | } | 672 | } |
669 | 673 | ||
670 | rcu_read_lock(); | 674 | rcu_read_lock(); |
diff --git a/net/core/filter.c b/net/core/filter.c index 13079f03902e..05a04ea87172 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -478,9 +478,9 @@ do_pass: | |||
478 | bpf_src = BPF_X; | 478 | bpf_src = BPF_X; |
479 | } else { | 479 | } else { |
480 | insn->dst_reg = BPF_REG_A; | 480 | insn->dst_reg = BPF_REG_A; |
481 | insn->src_reg = BPF_REG_X; | ||
482 | insn->imm = fp->k; | 481 | insn->imm = fp->k; |
483 | bpf_src = BPF_SRC(fp->code); | 482 | bpf_src = BPF_SRC(fp->code); |
483 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; | ||
484 | } | 484 | } |
485 | 485 | ||
486 | /* Common case where 'jump_false' is next insn. */ | 486 | /* Common case where 'jump_false' is next insn. */ |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b279077c3089..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -31,7 +31,6 @@ | |||
31 | static const char fmt_hex[] = "%#x\n"; | 31 | static const char fmt_hex[] = "%#x\n"; |
32 | static const char fmt_long_hex[] = "%#lx\n"; | 32 | static const char fmt_long_hex[] = "%#lx\n"; |
33 | static const char fmt_dec[] = "%d\n"; | 33 | static const char fmt_dec[] = "%d\n"; |
34 | static const char fmt_udec[] = "%u\n"; | ||
35 | static const char fmt_ulong[] = "%lu\n"; | 34 | static const char fmt_ulong[] = "%lu\n"; |
36 | static const char fmt_u64[] = "%llu\n"; | 35 | static const char fmt_u64[] = "%llu\n"; |
37 | 36 | ||
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, | |||
202 | if (netif_running(netdev)) { | 201 | if (netif_running(netdev)) { |
203 | struct ethtool_cmd cmd; | 202 | struct ethtool_cmd cmd; |
204 | if (!__ethtool_get_settings(netdev, &cmd)) | 203 | if (!__ethtool_get_settings(netdev, &cmd)) |
205 | ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); | 204 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); |
206 | } | 205 | } |
207 | rtnl_unlock(); | 206 | rtnl_unlock(); |
208 | return ret; | 207 | return ret; |
@@ -1481,6 +1480,15 @@ static int of_dev_node_match(struct device *dev, const void *data) | |||
1481 | return ret == 0 ? dev->of_node == data : ret; | 1480 | return ret == 0 ? dev->of_node == data : ret; |
1482 | } | 1481 | } |
1483 | 1482 | ||
1483 | /* | ||
1484 | * of_find_net_device_by_node - lookup the net device for the device node | ||
1485 | * @np: OF device node | ||
1486 | * | ||
1487 | * Looks up the net_device structure corresponding with the device node. | ||
1488 | * If successful, returns a pointer to the net_device with the embedded | ||
1489 | * struct device refcount incremented by one, or NULL on failure. The | ||
1490 | * refcount must be dropped when done with the net_device. | ||
1491 | */ | ||
1484 | struct net_device *of_find_net_device_by_node(struct device_node *np) | 1492 | struct net_device *of_find_net_device_by_node(struct device_node *np) |
1485 | { | 1493 | { |
1486 | struct device *dev; | 1494 | struct device *dev; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6aa3db8dfc3b..8bdada242a7d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work) | |||
142 | */ | 142 | */ |
143 | static int poll_one_napi(struct napi_struct *napi, int budget) | 143 | static int poll_one_napi(struct napi_struct *napi, int budget) |
144 | { | 144 | { |
145 | int work; | 145 | int work = 0; |
146 | 146 | ||
147 | /* net_rx_action's ->poll() invocations and our's are | 147 | /* net_rx_action's ->poll() invocations and our's are |
148 | * synchronized by this test which is only made while | 148 | * synchronized by this test which is only made while |
@@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget) | |||
151 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | 151 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) |
152 | return budget; | 152 | return budget; |
153 | 153 | ||
154 | set_bit(NAPI_STATE_NPSVC, &napi->state); | 154 | /* If we set this bit but see that it has already been set, |
155 | * that indicates that napi has been disabled and we need | ||
156 | * to abort this operation | ||
157 | */ | ||
158 | if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) | ||
159 | goto out; | ||
155 | 160 | ||
156 | work = napi->poll(napi, budget); | 161 | work = napi->poll(napi, budget); |
157 | WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); | 162 | WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); |
@@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget) | |||
159 | 164 | ||
160 | clear_bit(NAPI_STATE_NPSVC, &napi->state); | 165 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
161 | 166 | ||
167 | out: | ||
162 | return budget - work; | 168 | return budget - work; |
163 | } | 169 | } |
164 | 170 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a466821d1441..0ec48403ed68 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -3047,6 +3047,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3047 | u32 portid = NETLINK_CB(cb->skb).portid; | 3047 | u32 portid = NETLINK_CB(cb->skb).portid; |
3048 | u32 seq = cb->nlh->nlmsg_seq; | 3048 | u32 seq = cb->nlh->nlmsg_seq; |
3049 | u32 filter_mask = 0; | 3049 | u32 filter_mask = 0; |
3050 | int err; | ||
3050 | 3051 | ||
3051 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { | 3052 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { |
3052 | struct nlattr *extfilt; | 3053 | struct nlattr *extfilt; |
@@ -3067,20 +3068,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3067 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); | 3068 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
3068 | 3069 | ||
3069 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { | 3070 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { |
3070 | if (idx >= cb->args[0] && | 3071 | if (idx >= cb->args[0]) { |
3071 | br_dev->netdev_ops->ndo_bridge_getlink( | 3072 | err = br_dev->netdev_ops->ndo_bridge_getlink( |
3072 | skb, portid, seq, dev, filter_mask, | 3073 | skb, portid, seq, dev, |
3073 | NLM_F_MULTI) < 0) | 3074 | filter_mask, NLM_F_MULTI); |
3074 | break; | 3075 | if (err < 0 && err != -EOPNOTSUPP) |
3076 | break; | ||
3077 | } | ||
3075 | idx++; | 3078 | idx++; |
3076 | } | 3079 | } |
3077 | 3080 | ||
3078 | if (ops->ndo_bridge_getlink) { | 3081 | if (ops->ndo_bridge_getlink) { |
3079 | if (idx >= cb->args[0] && | 3082 | if (idx >= cb->args[0]) { |
3080 | ops->ndo_bridge_getlink(skb, portid, seq, dev, | 3083 | err = ops->ndo_bridge_getlink(skb, portid, |
3081 | filter_mask, | 3084 | seq, dev, |
3082 | NLM_F_MULTI) < 0) | 3085 | filter_mask, |
3083 | break; | 3086 | NLM_F_MULTI); |
3087 | if (err < 0 && err != -EOPNOTSUPP) | ||
3088 | break; | ||
3089 | } | ||
3084 | idx++; | 3090 | idx++; |
3085 | } | 3091 | } |
3086 | } | 3092 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); | |||
2958 | */ | 2958 | */ |
2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) | 2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
2960 | { | 2960 | { |
2961 | unsigned char *data = skb->data; | ||
2962 | |||
2961 | BUG_ON(len > skb->len); | 2963 | BUG_ON(len > skb->len); |
2962 | skb->len -= len; | 2964 | __skb_pull(skb, len); |
2963 | BUG_ON(skb->len < skb->data_len); | 2965 | skb_postpull_rcsum(skb, data, len); |
2964 | skb_postpull_rcsum(skb, skb->data, len); | 2966 | return skb->data; |
2965 | return skb->data += len; | ||
2966 | } | 2967 | } |
2967 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); | 2968 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
2968 | 2969 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index ca2984afe16e..3307c02244d3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot) | |||
2740 | return; | 2740 | return; |
2741 | kfree(rsk_prot->slab_name); | 2741 | kfree(rsk_prot->slab_name); |
2742 | rsk_prot->slab_name = NULL; | 2742 | rsk_prot->slab_name = NULL; |
2743 | if (rsk_prot->slab) { | 2743 | kmem_cache_destroy(rsk_prot->slab); |
2744 | kmem_cache_destroy(rsk_prot->slab); | 2744 | rsk_prot->slab = NULL; |
2745 | rsk_prot->slab = NULL; | ||
2746 | } | ||
2747 | } | 2745 | } |
2748 | 2746 | ||
2749 | static int req_prot_init(const struct proto *prot) | 2747 | static int req_prot_init(const struct proto *prot) |
@@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot) | |||
2828 | list_del(&prot->node); | 2826 | list_del(&prot->node); |
2829 | mutex_unlock(&proto_list_mutex); | 2827 | mutex_unlock(&proto_list_mutex); |
2830 | 2828 | ||
2831 | if (prot->slab != NULL) { | 2829 | kmem_cache_destroy(prot->slab); |
2832 | kmem_cache_destroy(prot->slab); | 2830 | prot->slab = NULL; |
2833 | prot->slab = NULL; | ||
2834 | } | ||
2835 | 2831 | ||
2836 | req_prot_cleanup(prot->rsk_prot); | 2832 | req_prot_cleanup(prot->rsk_prot); |
2837 | 2833 | ||
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bd9e718c2a20..3de0d0362d7f 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -398,12 +398,8 @@ out_err: | |||
398 | 398 | ||
399 | void dccp_ackvec_exit(void) | 399 | void dccp_ackvec_exit(void) |
400 | { | 400 | { |
401 | if (dccp_ackvec_slab != NULL) { | 401 | kmem_cache_destroy(dccp_ackvec_slab); |
402 | kmem_cache_destroy(dccp_ackvec_slab); | 402 | dccp_ackvec_slab = NULL; |
403 | dccp_ackvec_slab = NULL; | 403 | kmem_cache_destroy(dccp_ackvec_record_slab); |
404 | } | 404 | dccp_ackvec_record_slab = NULL; |
405 | if (dccp_ackvec_record_slab != NULL) { | ||
406 | kmem_cache_destroy(dccp_ackvec_record_slab); | ||
407 | dccp_ackvec_record_slab = NULL; | ||
408 | } | ||
409 | } | 405 | } |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 83498975165f..90f77d08cc37 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f | |||
95 | 95 | ||
96 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) | 96 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) |
97 | { | 97 | { |
98 | if (slab != NULL) | 98 | kmem_cache_destroy(slab); |
99 | kmem_cache_destroy(slab); | ||
100 | } | 99 | } |
101 | 100 | ||
102 | static int __init ccid_activate(struct ccid_operations *ccid_ops) | 101 | static int __init ccid_activate(struct ccid_operations *ccid_ops) |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 30addee2dd03..838f524cf11a 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
48 | tw->tw_ipv6only = sk->sk_ipv6only; | 48 | tw->tw_ipv6only = sk->sk_ipv6only; |
49 | } | 49 | } |
50 | #endif | 50 | #endif |
51 | /* Linkage updates. */ | ||
52 | __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); | ||
53 | 51 | ||
54 | /* Get the TIME_WAIT timeout firing. */ | 52 | /* Get the TIME_WAIT timeout firing. */ |
55 | if (timeo < rto) | 53 | if (timeo < rto) |
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
60 | timeo = DCCP_TIMEWAIT_LEN; | 58 | timeo = DCCP_TIMEWAIT_LEN; |
61 | 59 | ||
62 | inet_twsk_schedule(tw, timeo); | 60 | inet_twsk_schedule(tw, timeo); |
61 | /* Linkage updates. */ | ||
62 | __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); | ||
63 | inet_twsk_put(tw); | 63 | inet_twsk_put(tw); |
64 | } else { | 64 | } else { |
65 | /* Sorry, if we're out of memory, just CLOSE this | 65 | /* Sorry, if we're out of memory, just CLOSE this |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 76e3800765f8..c59fa5d9c22c 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -634,6 +634,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) | |||
634 | port_index++; | 634 | port_index++; |
635 | } | 635 | } |
636 | kfree(pd->chip[i].rtable); | 636 | kfree(pd->chip[i].rtable); |
637 | |||
638 | /* Drop our reference to the MDIO bus device */ | ||
639 | if (pd->chip[i].host_dev) | ||
640 | put_device(pd->chip[i].host_dev); | ||
637 | } | 641 | } |
638 | kfree(pd->chip); | 642 | kfree(pd->chip); |
639 | } | 643 | } |
@@ -661,16 +665,22 @@ static int dsa_of_probe(struct device *dev) | |||
661 | return -EPROBE_DEFER; | 665 | return -EPROBE_DEFER; |
662 | 666 | ||
663 | ethernet = of_parse_phandle(np, "dsa,ethernet", 0); | 667 | ethernet = of_parse_phandle(np, "dsa,ethernet", 0); |
664 | if (!ethernet) | 668 | if (!ethernet) { |
665 | return -EINVAL; | 669 | ret = -EINVAL; |
670 | goto out_put_mdio; | ||
671 | } | ||
666 | 672 | ||
667 | ethernet_dev = of_find_net_device_by_node(ethernet); | 673 | ethernet_dev = of_find_net_device_by_node(ethernet); |
668 | if (!ethernet_dev) | 674 | if (!ethernet_dev) { |
669 | return -EPROBE_DEFER; | 675 | ret = -EPROBE_DEFER; |
676 | goto out_put_mdio; | ||
677 | } | ||
670 | 678 | ||
671 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | 679 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
672 | if (!pd) | 680 | if (!pd) { |
673 | return -ENOMEM; | 681 | ret = -ENOMEM; |
682 | goto out_put_ethernet; | ||
683 | } | ||
674 | 684 | ||
675 | dev->platform_data = pd; | 685 | dev->platform_data = pd; |
676 | pd->of_netdev = ethernet_dev; | 686 | pd->of_netdev = ethernet_dev; |
@@ -691,7 +701,9 @@ static int dsa_of_probe(struct device *dev) | |||
691 | cd = &pd->chip[chip_index]; | 701 | cd = &pd->chip[chip_index]; |
692 | 702 | ||
693 | cd->of_node = child; | 703 | cd->of_node = child; |
694 | cd->host_dev = &mdio_bus->dev; | 704 | |
705 | /* When assigning the host device, increment its refcount */ | ||
706 | cd->host_dev = get_device(&mdio_bus->dev); | ||
695 | 707 | ||
696 | sw_addr = of_get_property(child, "reg", NULL); | 708 | sw_addr = of_get_property(child, "reg", NULL); |
697 | if (!sw_addr) | 709 | if (!sw_addr) |
@@ -711,6 +723,12 @@ static int dsa_of_probe(struct device *dev) | |||
711 | ret = -EPROBE_DEFER; | 723 | ret = -EPROBE_DEFER; |
712 | goto out_free_chip; | 724 | goto out_free_chip; |
713 | } | 725 | } |
726 | |||
727 | /* Drop the mdio_bus device ref, replacing the host | ||
728 | * device with the mdio_bus_switch device, keeping | ||
729 | * the refcount from of_mdio_find_bus() above. | ||
730 | */ | ||
731 | put_device(cd->host_dev); | ||
714 | cd->host_dev = &mdio_bus_switch->dev; | 732 | cd->host_dev = &mdio_bus_switch->dev; |
715 | } | 733 | } |
716 | 734 | ||
@@ -744,6 +762,10 @@ static int dsa_of_probe(struct device *dev) | |||
744 | } | 762 | } |
745 | } | 763 | } |
746 | 764 | ||
765 | /* The individual chips hold their own refcount on the mdio bus, | ||
766 | * so drop ours */ | ||
767 | put_device(&mdio_bus->dev); | ||
768 | |||
747 | return 0; | 769 | return 0; |
748 | 770 | ||
749 | out_free_chip: | 771 | out_free_chip: |
@@ -751,6 +773,10 @@ out_free_chip: | |||
751 | out_free: | 773 | out_free: |
752 | kfree(pd); | 774 | kfree(pd); |
753 | dev->platform_data = NULL; | 775 | dev->platform_data = NULL; |
776 | out_put_ethernet: | ||
777 | put_device(ðernet_dev->dev); | ||
778 | out_put_mdio: | ||
779 | put_device(&mdio_bus->dev); | ||
754 | return ret; | 780 | return ret; |
755 | } | 781 | } |
756 | 782 | ||
@@ -762,6 +788,7 @@ static void dsa_of_remove(struct device *dev) | |||
762 | return; | 788 | return; |
763 | 789 | ||
764 | dsa_of_free_platform_data(pd); | 790 | dsa_of_free_platform_data(pd); |
791 | put_device(&pd->of_netdev->dev); | ||
765 | kfree(pd); | 792 | kfree(pd); |
766 | } | 793 | } |
767 | #else | 794 | #else |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) | |||
458 | static int dsa_slave_port_attr_set(struct net_device *dev, | 458 | static int dsa_slave_port_attr_set(struct net_device *dev, |
459 | struct switchdev_attr *attr) | 459 | struct switchdev_attr *attr) |
460 | { | 460 | { |
461 | int ret = 0; | 461 | struct dsa_slave_priv *p = netdev_priv(dev); |
462 | struct dsa_switch *ds = p->parent; | ||
463 | int ret; | ||
462 | 464 | ||
463 | switch (attr->id) { | 465 | switch (attr->id) { |
464 | case SWITCHDEV_ATTR_PORT_STP_STATE: | 466 | case SWITCHDEV_ATTR_PORT_STP_STATE: |
465 | if (attr->trans == SWITCHDEV_TRANS_COMMIT) | 467 | if (attr->trans == SWITCHDEV_TRANS_PREPARE) |
466 | ret = dsa_slave_stp_update(dev, attr->u.stp_state); | 468 | ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; |
469 | else | ||
470 | ret = ds->drv->port_stp_update(ds, p->port, | ||
471 | attr->u.stp_state); | ||
467 | break; | 472 | break; |
468 | default: | 473 | default: |
469 | ret = -EOPNOTSUPP; | 474 | ret = -EOPNOTSUPP; |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d25efc93d8f1..b6ca0890d018 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -78,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, | |||
78 | 78 | ||
79 | trailer = skb_tail_pointer(skb) - 4; | 79 | trailer = skb_tail_pointer(skb) - 4; |
80 | if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || | 80 | if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || |
81 | (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00) | 81 | (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00) |
82 | goto out_drop; | 82 | goto out_drop; |
83 | 83 | ||
84 | source_port = trailer[1] & 7; | 84 | source_port = trailer[1] & 7; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 30409b75e925..f03db8b7abee 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -113,6 +113,8 @@ | |||
113 | #include <net/arp.h> | 113 | #include <net/arp.h> |
114 | #include <net/ax25.h> | 114 | #include <net/ax25.h> |
115 | #include <net/netrom.h> | 115 | #include <net/netrom.h> |
116 | #include <net/dst_metadata.h> | ||
117 | #include <net/ip_tunnels.h> | ||
116 | 118 | ||
117 | #include <linux/uaccess.h> | 119 | #include <linux/uaccess.h> |
118 | 120 | ||
@@ -296,7 +298,8 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, | |||
296 | struct net_device *dev, __be32 src_ip, | 298 | struct net_device *dev, __be32 src_ip, |
297 | const unsigned char *dest_hw, | 299 | const unsigned char *dest_hw, |
298 | const unsigned char *src_hw, | 300 | const unsigned char *src_hw, |
299 | const unsigned char *target_hw, struct sk_buff *oskb) | 301 | const unsigned char *target_hw, |
302 | struct dst_entry *dst) | ||
300 | { | 303 | { |
301 | struct sk_buff *skb; | 304 | struct sk_buff *skb; |
302 | 305 | ||
@@ -309,9 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, | |||
309 | if (!skb) | 312 | if (!skb) |
310 | return; | 313 | return; |
311 | 314 | ||
312 | if (oskb) | 315 | skb_dst_set(skb, dst); |
313 | skb_dst_copy(skb, oskb); | ||
314 | |||
315 | arp_xmit(skb); | 316 | arp_xmit(skb); |
316 | } | 317 | } |
317 | 318 | ||
@@ -333,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
333 | __be32 target = *(__be32 *)neigh->primary_key; | 334 | __be32 target = *(__be32 *)neigh->primary_key; |
334 | int probes = atomic_read(&neigh->probes); | 335 | int probes = atomic_read(&neigh->probes); |
335 | struct in_device *in_dev; | 336 | struct in_device *in_dev; |
337 | struct dst_entry *dst = NULL; | ||
336 | 338 | ||
337 | rcu_read_lock(); | 339 | rcu_read_lock(); |
338 | in_dev = __in_dev_get_rcu(dev); | 340 | in_dev = __in_dev_get_rcu(dev); |
@@ -381,9 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
381 | } | 383 | } |
382 | } | 384 | } |
383 | 385 | ||
386 | if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | ||
387 | dst = dst_clone(skb_dst(skb)); | ||
384 | arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, | 388 | arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, |
385 | dst_hw, dev->dev_addr, NULL, | 389 | dst_hw, dev->dev_addr, NULL, dst); |
386 | dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb); | ||
387 | } | 390 | } |
388 | 391 | ||
389 | static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) | 392 | static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) |
@@ -649,6 +652,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
649 | int addr_type; | 652 | int addr_type; |
650 | struct neighbour *n; | 653 | struct neighbour *n; |
651 | struct net *net = dev_net(dev); | 654 | struct net *net = dev_net(dev); |
655 | struct dst_entry *reply_dst = NULL; | ||
652 | bool is_garp = false; | 656 | bool is_garp = false; |
653 | 657 | ||
654 | /* arp_rcv below verifies the ARP header and verifies the device | 658 | /* arp_rcv below verifies the ARP header and verifies the device |
@@ -749,13 +753,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
749 | * cache. | 753 | * cache. |
750 | */ | 754 | */ |
751 | 755 | ||
756 | if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb)) | ||
757 | reply_dst = (struct dst_entry *) | ||
758 | iptunnel_metadata_reply(skb_metadata_dst(skb), | ||
759 | GFP_ATOMIC); | ||
760 | |||
752 | /* Special case: IPv4 duplicate address detection packet (RFC2131) */ | 761 | /* Special case: IPv4 duplicate address detection packet (RFC2131) */ |
753 | if (sip == 0) { | 762 | if (sip == 0) { |
754 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 763 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
755 | inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && | 764 | inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && |
756 | !arp_ignore(in_dev, sip, tip)) | 765 | !arp_ignore(in_dev, sip, tip)) |
757 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, | 766 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, |
758 | dev->dev_addr, sha); | 767 | sha, dev->dev_addr, sha, reply_dst); |
759 | goto out; | 768 | goto out; |
760 | } | 769 | } |
761 | 770 | ||
@@ -774,9 +783,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
774 | if (!dont_send) { | 783 | if (!dont_send) { |
775 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); | 784 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); |
776 | if (n) { | 785 | if (n) { |
777 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, | 786 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, |
778 | dev, tip, sha, dev->dev_addr, | 787 | sip, dev, tip, sha, |
779 | sha); | 788 | dev->dev_addr, sha, |
789 | reply_dst); | ||
780 | neigh_release(n); | 790 | neigh_release(n); |
781 | } | 791 | } |
782 | } | 792 | } |
@@ -794,9 +804,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
794 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || | 804 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || |
795 | skb->pkt_type == PACKET_HOST || | 805 | skb->pkt_type == PACKET_HOST || |
796 | NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { | 806 | NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { |
797 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, | 807 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, |
798 | dev, tip, sha, dev->dev_addr, | 808 | sip, dev, tip, sha, |
799 | sha); | 809 | dev->dev_addr, sha, |
810 | reply_dst); | ||
800 | } else { | 811 | } else { |
801 | pneigh_enqueue(&arp_tbl, | 812 | pneigh_enqueue(&arp_tbl, |
802 | in_dev->arp_parms, skb); | 813 | in_dev->arp_parms, skb); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
340 | fl4.flowi4_tos = tos; | 340 | fl4.flowi4_tos = tos; |
341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
342 | fl4.flowi4_tun_key.tun_id = 0; | 342 | fl4.flowi4_tun_key.tun_id = 0; |
343 | fl4.flowi4_flags = 0; | ||
343 | 344 | ||
344 | no_addr = idev->ifa_list == NULL; | 345 | no_addr = idev->ifa_list == NULL; |
345 | 346 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 26d6ffb6d23c..6c2af797f2f9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1426,7 +1426,7 @@ found: | |||
1426 | nh->nh_flags & RTNH_F_LINKDOWN && | 1426 | nh->nh_flags & RTNH_F_LINKDOWN && |
1427 | !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) | 1427 | !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) |
1428 | continue; | 1428 | continue; |
1429 | if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { | 1429 | if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { |
1430 | if (flp->flowi4_oif && | 1430 | if (flp->flowi4_oif && |
1431 | flp->flowi4_oif != nh->nh_oif) | 1431 | flp->flowi4_oif != nh->nh_oif) |
1432 | continue; | 1432 | continue; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 79fe05befcae..e5eb8ac4089d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
427 | fl4.flowi4_mark = mark; | 427 | fl4.flowi4_mark = mark; |
428 | fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); | 428 | fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); |
429 | fl4.flowi4_proto = IPPROTO_ICMP; | 429 | fl4.flowi4_proto = IPPROTO_ICMP; |
430 | fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; | 430 | fl4.flowi4_oif = vrf_master_ifindex(skb->dev); |
431 | security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); | 431 | security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); |
432 | rt = ip_route_output_key(net, &fl4); | 432 | rt = ip_route_output_key(net, &fl4); |
433 | if (IS_ERR(rt)) | 433 | if (IS_ERR(rt)) |
@@ -461,7 +461,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
461 | fl4->flowi4_proto = IPPROTO_ICMP; | 461 | fl4->flowi4_proto = IPPROTO_ICMP; |
462 | fl4->fl4_icmp_type = type; | 462 | fl4->fl4_icmp_type = type; |
463 | fl4->fl4_icmp_code = code; | 463 | fl4->fl4_icmp_code = code; |
464 | fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; | 464 | fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev); |
465 | 465 | ||
466 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); | 466 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); |
467 | rt = __ip_route_output_key(net, fl4); | 467 | rt = __ip_route_output_key(net, fl4); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 134957159c27..7bb9c39e0a4d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -685,20 +685,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, | |||
685 | req->num_timeout = 0; | 685 | req->num_timeout = 0; |
686 | req->sk = NULL; | 686 | req->sk = NULL; |
687 | 687 | ||
688 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); | ||
689 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | ||
690 | req->rsk_hash = hash; | ||
691 | |||
688 | /* before letting lookups find us, make sure all req fields | 692 | /* before letting lookups find us, make sure all req fields |
689 | * are committed to memory and refcnt initialized. | 693 | * are committed to memory and refcnt initialized. |
690 | */ | 694 | */ |
691 | smp_wmb(); | 695 | smp_wmb(); |
692 | atomic_set(&req->rsk_refcnt, 2); | 696 | atomic_set(&req->rsk_refcnt, 2); |
693 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); | ||
694 | req->rsk_hash = hash; | ||
695 | 697 | ||
696 | spin_lock(&queue->syn_wait_lock); | 698 | spin_lock(&queue->syn_wait_lock); |
697 | req->dl_next = lopt->syn_table[hash]; | 699 | req->dl_next = lopt->syn_table[hash]; |
698 | lopt->syn_table[hash] = req; | 700 | lopt->syn_table[hash] = req; |
699 | spin_unlock(&queue->syn_wait_lock); | 701 | spin_unlock(&queue->syn_wait_lock); |
700 | |||
701 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | ||
702 | } | 702 | } |
703 | EXPORT_SYMBOL(reqsk_queue_hash_req); | 703 | EXPORT_SYMBOL(reqsk_queue_hash_req); |
704 | 704 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ae22cc24fbe8..c67f9bd7699c 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
123 | /* | 123 | /* |
124 | * Step 2: Hash TW into tcp ehash chain. | 124 | * Step 2: Hash TW into tcp ehash chain. |
125 | * Notes : | 125 | * Notes : |
126 | * - tw_refcnt is set to 3 because : | 126 | * - tw_refcnt is set to 4 because : |
127 | * - We have one reference from bhash chain. | 127 | * - We have one reference from bhash chain. |
128 | * - We have one reference from ehash chain. | 128 | * - We have one reference from ehash chain. |
129 | * - We have one reference from timer. | ||
130 | * - One reference for ourself (our caller will release it). | ||
129 | * We can use atomic_set() because prior spin_lock()/spin_unlock() | 131 | * We can use atomic_set() because prior spin_lock()/spin_unlock() |
130 | * committed into memory all tw fields. | 132 | * committed into memory all tw fields. |
131 | */ | 133 | */ |
132 | atomic_set(&tw->tw_refcnt, 1 + 1 + 1); | 134 | atomic_set(&tw->tw_refcnt, 4); |
133 | inet_twsk_add_node_rcu(tw, &ehead->chain); | 135 | inet_twsk_add_node_rcu(tw, &ehead->chain); |
134 | 136 | ||
135 | /* Step 3: Remove SK from hash chain */ | 137 | /* Step 3: Remove SK from hash chain */ |
@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) | |||
217 | } | 219 | } |
218 | EXPORT_SYMBOL(inet_twsk_deschedule_put); | 220 | EXPORT_SYMBOL(inet_twsk_deschedule_put); |
219 | 221 | ||
220 | void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) | 222 | void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) |
221 | { | 223 | { |
222 | /* timeout := RTO * 3.5 | 224 | /* timeout := RTO * 3.5 |
223 | * | 225 | * |
@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) | |||
245 | */ | 247 | */ |
246 | 248 | ||
247 | tw->tw_kill = timeo <= 4*HZ; | 249 | tw->tw_kill = timeo <= 4*HZ; |
248 | if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { | 250 | if (!rearm) { |
249 | atomic_inc(&tw->tw_refcnt); | 251 | BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo)); |
250 | atomic_inc(&tw->tw_dr->tw_count); | 252 | atomic_inc(&tw->tw_dr->tw_count); |
253 | } else { | ||
254 | mod_timer_pending(&tw->tw_timer, jiffies + timeo); | ||
251 | } | 255 | } |
252 | } | 256 | } |
253 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); | 257 | EXPORT_SYMBOL_GPL(__inet_twsk_schedule); |
254 | 258 | ||
255 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, | 259 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
256 | struct inet_timewait_death_row *twdr, int family) | 260 | struct inet_timewait_death_row *twdr, int family) |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 29ed6c5a5185..84dce6a92f93 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -46,12 +46,13 @@ | |||
46 | #include <net/net_namespace.h> | 46 | #include <net/net_namespace.h> |
47 | #include <net/netns/generic.h> | 47 | #include <net/netns/generic.h> |
48 | #include <net/rtnetlink.h> | 48 | #include <net/rtnetlink.h> |
49 | #include <net/dst_metadata.h> | ||
49 | 50 | ||
50 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | 51 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
51 | __be32 src, __be32 dst, __u8 proto, | 52 | __be32 src, __be32 dst, __u8 proto, |
52 | __u8 tos, __u8 ttl, __be16 df, bool xnet) | 53 | __u8 tos, __u8 ttl, __be16 df, bool xnet) |
53 | { | 54 | { |
54 | int pkt_len = skb->len; | 55 | int pkt_len = skb->len - skb_inner_network_offset(skb); |
55 | struct iphdr *iph; | 56 | struct iphdr *iph; |
56 | int err; | 57 | int err; |
57 | 58 | ||
@@ -119,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
119 | } | 120 | } |
120 | EXPORT_SYMBOL_GPL(iptunnel_pull_header); | 121 | EXPORT_SYMBOL_GPL(iptunnel_pull_header); |
121 | 122 | ||
123 | struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, | ||
124 | gfp_t flags) | ||
125 | { | ||
126 | struct metadata_dst *res; | ||
127 | struct ip_tunnel_info *dst, *src; | ||
128 | |||
129 | if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) | ||
130 | return NULL; | ||
131 | |||
132 | res = metadata_dst_alloc(0, flags); | ||
133 | if (!res) | ||
134 | return NULL; | ||
135 | |||
136 | dst = &res->u.tun_info; | ||
137 | src = &md->u.tun_info; | ||
138 | dst->key.tun_id = src->key.tun_id; | ||
139 | if (src->mode & IP_TUNNEL_INFO_IPV6) | ||
140 | memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src, | ||
141 | sizeof(struct in6_addr)); | ||
142 | else | ||
143 | dst->key.u.ipv4.dst = src->key.u.ipv4.src; | ||
144 | dst->mode = src->mode | IP_TUNNEL_INFO_TX; | ||
145 | |||
146 | return res; | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); | ||
149 | |||
122 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, | 150 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, |
123 | bool csum_help, | 151 | bool csum_help, |
124 | int gso_type_mask) | 152 | int gso_type_mask) |
@@ -198,8 +226,6 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { | |||
198 | [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, | 226 | [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, |
199 | [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, | 227 | [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, |
200 | [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, | 228 | [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, |
201 | [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 }, | ||
202 | [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 }, | ||
203 | [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, | 229 | [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, |
204 | }; | 230 | }; |
205 | 231 | ||
@@ -239,12 +265,6 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, | |||
239 | if (tb[LWTUNNEL_IP_TOS]) | 265 | if (tb[LWTUNNEL_IP_TOS]) |
240 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); | 266 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); |
241 | 267 | ||
242 | if (tb[LWTUNNEL_IP_SPORT]) | ||
243 | tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]); | ||
244 | |||
245 | if (tb[LWTUNNEL_IP_DPORT]) | ||
246 | tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]); | ||
247 | |||
248 | if (tb[LWTUNNEL_IP_FLAGS]) | 268 | if (tb[LWTUNNEL_IP_FLAGS]) |
249 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); | 269 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); |
250 | 270 | ||
@@ -266,8 +286,6 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, | |||
266 | nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || | 286 | nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || |
267 | nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || | 287 | nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || |
268 | nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || | 288 | nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || |
269 | nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) || | ||
270 | nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) || | ||
271 | nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) | 289 | nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) |
272 | return -ENOMEM; | 290 | return -ENOMEM; |
273 | 291 | ||
@@ -281,8 +299,6 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) | |||
281 | + nla_total_size(4) /* LWTUNNEL_IP_SRC */ | 299 | + nla_total_size(4) /* LWTUNNEL_IP_SRC */ |
282 | + nla_total_size(1) /* LWTUNNEL_IP_TOS */ | 300 | + nla_total_size(1) /* LWTUNNEL_IP_TOS */ |
283 | + nla_total_size(1) /* LWTUNNEL_IP_TTL */ | 301 | + nla_total_size(1) /* LWTUNNEL_IP_TTL */ |
284 | + nla_total_size(2) /* LWTUNNEL_IP_SPORT */ | ||
285 | + nla_total_size(2) /* LWTUNNEL_IP_DPORT */ | ||
286 | + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ | 302 | + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ |
287 | } | 303 | } |
288 | 304 | ||
@@ -305,8 +321,6 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { | |||
305 | [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, | 321 | [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, |
306 | [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, | 322 | [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, |
307 | [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, | 323 | [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, |
308 | [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 }, | ||
309 | [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 }, | ||
310 | [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, | 324 | [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, |
311 | }; | 325 | }; |
312 | 326 | ||
@@ -346,12 +360,6 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr, | |||
346 | if (tb[LWTUNNEL_IP6_TC]) | 360 | if (tb[LWTUNNEL_IP6_TC]) |
347 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); | 361 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); |
348 | 362 | ||
349 | if (tb[LWTUNNEL_IP6_SPORT]) | ||
350 | tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]); | ||
351 | |||
352 | if (tb[LWTUNNEL_IP6_DPORT]) | ||
353 | tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]); | ||
354 | |||
355 | if (tb[LWTUNNEL_IP6_FLAGS]) | 363 | if (tb[LWTUNNEL_IP6_FLAGS]) |
356 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); | 364 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); |
357 | 365 | ||
@@ -373,8 +381,6 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, | |||
373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || | 381 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || |
374 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || | 382 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || |
375 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || | 383 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || |
376 | nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) || | ||
377 | nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) || | ||
378 | nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) | 384 | nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) |
379 | return -ENOMEM; | 385 | return -ENOMEM; |
380 | 386 | ||
@@ -388,8 +394,6 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) | |||
388 | + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ | 394 | + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ |
389 | + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ | 395 | + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ |
390 | + nla_total_size(1) /* LWTUNNEL_IP6_TC */ | 396 | + nla_total_size(1) /* LWTUNNEL_IP6_TC */ |
391 | + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */ | ||
392 | + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */ | ||
393 | + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ | 397 | + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ |
394 | } | 398 | } |
395 | 399 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5f4a5565ad8b..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1737 | fl4.flowi4_mark = skb->mark; | 1737 | fl4.flowi4_mark = skb->mark; |
1738 | fl4.flowi4_tos = tos; | 1738 | fl4.flowi4_tos = tos; |
1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
1740 | fl4.flowi4_flags = 0; | ||
1740 | fl4.daddr = daddr; | 1741 | fl4.daddr = daddr; |
1741 | fl4.saddr = saddr; | 1742 | fl4.saddr = saddr; |
1742 | err = fib_lookup(net, &fl4, &res, 0); | 1743 | err = fib_lookup(net, &fl4, &res, 0); |
@@ -2045,6 +2046,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2045 | struct fib_result res; | 2046 | struct fib_result res; |
2046 | struct rtable *rth; | 2047 | struct rtable *rth; |
2047 | int orig_oif; | 2048 | int orig_oif; |
2049 | int err = -ENETUNREACH; | ||
2048 | 2050 | ||
2049 | res.tclassid = 0; | 2051 | res.tclassid = 0; |
2050 | res.fi = NULL; | 2052 | res.fi = NULL; |
@@ -2153,7 +2155,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2153 | goto make_route; | 2155 | goto make_route; |
2154 | } | 2156 | } |
2155 | 2157 | ||
2156 | if (fib_lookup(net, fl4, &res, 0)) { | 2158 | err = fib_lookup(net, fl4, &res, 0); |
2159 | if (err) { | ||
2157 | res.fi = NULL; | 2160 | res.fi = NULL; |
2158 | res.table = NULL; | 2161 | res.table = NULL; |
2159 | if (fl4->flowi4_oif) { | 2162 | if (fl4->flowi4_oif) { |
@@ -2181,7 +2184,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2181 | res.type = RTN_UNICAST; | 2184 | res.type = RTN_UNICAST; |
2182 | goto make_route; | 2185 | goto make_route; |
2183 | } | 2186 | } |
2184 | rth = ERR_PTR(-ENETUNREACH); | 2187 | rth = ERR_PTR(err); |
2185 | goto out; | 2188 | goto out; |
2186 | } | 2189 | } |
2187 | 2190 | ||
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index c6ded6b2a79f..448c2615fece 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk) | |||
154 | static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) | 154 | static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) |
155 | { | 155 | { |
156 | if (event == CA_EVENT_TX_START) { | 156 | if (event == CA_EVENT_TX_START) { |
157 | s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime; | ||
158 | struct bictcp *ca = inet_csk_ca(sk); | 157 | struct bictcp *ca = inet_csk_ca(sk); |
158 | u32 now = tcp_time_stamp; | ||
159 | s32 delta; | ||
160 | |||
161 | delta = now - tcp_sk(sk)->lsndtime; | ||
159 | 162 | ||
160 | /* We were application limited (idle) for a while. | 163 | /* We were application limited (idle) for a while. |
161 | * Shift epoch_start to keep cwnd growth to cubic curve. | 164 | * Shift epoch_start to keep cwnd growth to cubic curve. |
162 | */ | 165 | */ |
163 | if (ca->epoch_start && delta > 0) | 166 | if (ca->epoch_start && delta > 0) { |
164 | ca->epoch_start += delta; | 167 | ca->epoch_start += delta; |
168 | if (after(ca->epoch_start, now)) | ||
169 | ca->epoch_start = now; | ||
170 | } | ||
165 | return; | 171 | return; |
166 | } | 172 | } |
167 | } | 173 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6d8795b066ac..def765911ff8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -162,9 +162,9 @@ kill_with_rst: | |||
162 | if (tcp_death_row.sysctl_tw_recycle && | 162 | if (tcp_death_row.sysctl_tw_recycle && |
163 | tcptw->tw_ts_recent_stamp && | 163 | tcptw->tw_ts_recent_stamp && |
164 | tcp_tw_remember_stamp(tw)) | 164 | tcp_tw_remember_stamp(tw)) |
165 | inet_twsk_schedule(tw, tw->tw_timeout); | 165 | inet_twsk_reschedule(tw, tw->tw_timeout); |
166 | else | 166 | else |
167 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 167 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
168 | return TCP_TW_ACK; | 168 | return TCP_TW_ACK; |
169 | } | 169 | } |
170 | 170 | ||
@@ -201,7 +201,7 @@ kill: | |||
201 | return TCP_TW_SUCCESS; | 201 | return TCP_TW_SUCCESS; |
202 | } | 202 | } |
203 | } | 203 | } |
204 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 204 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
205 | 205 | ||
206 | if (tmp_opt.saw_tstamp) { | 206 | if (tmp_opt.saw_tstamp) { |
207 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 207 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
@@ -251,7 +251,7 @@ kill: | |||
251 | * Do not reschedule in the last case. | 251 | * Do not reschedule in the last case. |
252 | */ | 252 | */ |
253 | if (paws_reject || th->ack) | 253 | if (paws_reject || th->ack) |
254 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 254 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
255 | 255 | ||
256 | return tcp_timewait_check_oow_rate_limit( | 256 | return tcp_timewait_check_oow_rate_limit( |
257 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); | 257 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); |
@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
322 | } while (0); | 322 | } while (0); |
323 | #endif | 323 | #endif |
324 | 324 | ||
325 | /* Linkage updates. */ | ||
326 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | ||
327 | |||
328 | /* Get the TIME_WAIT timeout firing. */ | 325 | /* Get the TIME_WAIT timeout firing. */ |
329 | if (timeo < rto) | 326 | if (timeo < rto) |
330 | timeo = rto; | 327 | timeo = rto; |
@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
338 | } | 335 | } |
339 | 336 | ||
340 | inet_twsk_schedule(tw, timeo); | 337 | inet_twsk_schedule(tw, timeo); |
338 | /* Linkage updates. */ | ||
339 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | ||
341 | inet_twsk_put(tw); | 340 | inet_twsk_put(tw); |
342 | } else { | 341 | } else { |
343 | /* Sorry, if we're out of memory, just CLOSE this | 342 | /* Sorry, if we're out of memory, just CLOSE this |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f9a8a12b62ee..1100ffe4a722 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2897,6 +2897,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2897 | skb_reserve(skb, MAX_TCP_HEADER); | 2897 | skb_reserve(skb, MAX_TCP_HEADER); |
2898 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), | 2898 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), |
2899 | TCPHDR_ACK | TCPHDR_RST); | 2899 | TCPHDR_ACK | TCPHDR_RST); |
2900 | skb_mstamp_get(&skb->skb_mstamp); | ||
2900 | /* Send it off. */ | 2901 | /* Send it off. */ |
2901 | if (tcp_transmit_skb(sk, skb, 0, priority)) | 2902 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
2902 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); | 2903 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c0a15e7f359f..f7d1d5e19e95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1024,7 +1024,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1024 | if (netif_index_is_vrf(net, ipc.oif)) { | 1024 | if (netif_index_is_vrf(net, ipc.oif)) { |
1025 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, | 1025 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, |
1026 | RT_SCOPE_UNIVERSE, sk->sk_protocol, | 1026 | RT_SCOPE_UNIVERSE, sk->sk_protocol, |
1027 | (flow_flags | FLOWI_FLAG_VRFSRC), | 1027 | (flow_flags | FLOWI_FLAG_VRFSRC | |
1028 | FLOWI_FLAG_SKIP_NH_OIF), | ||
1028 | faddr, saddr, dport, | 1029 | faddr, saddr, dport, |
1029 | inet->inet_sport); | 1030 | inet->inet_sport); |
1030 | 1031 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index bb919b28619f..c10a9ee68433 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -33,6 +33,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, | |||
33 | if (saddr) | 33 | if (saddr) |
34 | fl4->saddr = saddr->a4; | 34 | fl4->saddr = saddr->a4; |
35 | 35 | ||
36 | fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF; | ||
37 | |||
36 | rt = __ip_route_output_key(net, fl4); | 38 | rt = __ip_route_output_key(net, fl4); |
37 | if (!IS_ERR(rt)) | 39 | if (!IS_ERR(rt)) |
38 | return &rt->dst; | 40 | return &rt->dst; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 030fefdc9aed..900113376d4e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -5127,13 +5127,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
5127 | 5127 | ||
5128 | rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, | 5128 | rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, |
5129 | ifp->idev->dev, 0, 0); | 5129 | ifp->idev->dev, 0, 0); |
5130 | if (rt && ip6_del_rt(rt)) | 5130 | if (rt) |
5131 | dst_free(&rt->dst); | 5131 | ip6_del_rt(rt); |
5132 | } | 5132 | } |
5133 | dst_hold(&ifp->rt->dst); | 5133 | dst_hold(&ifp->rt->dst); |
5134 | 5134 | ||
5135 | if (ip6_del_rt(ifp->rt)) | 5135 | ip6_del_rt(ifp->rt); |
5136 | dst_free(&ifp->rt->dst); | ||
5137 | 5136 | ||
5138 | rt_genid_bump_ipv6(net); | 5137 | rt_genid_bump_ipv6(net); |
5139 | break; | 5138 | break; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 418d9823692b..7d2e0023c72d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn) | |||
155 | kmem_cache_free(fib6_node_kmem, fn); | 155 | kmem_cache_free(fib6_node_kmem, fn); |
156 | } | 156 | } |
157 | 157 | ||
158 | static void rt6_rcu_free(struct rt6_info *rt) | ||
159 | { | ||
160 | call_rcu(&rt->dst.rcu_head, dst_rcu_free); | ||
161 | } | ||
162 | |||
158 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | 163 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) |
159 | { | 164 | { |
160 | int cpu; | 165 | int cpu; |
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | |||
169 | ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); | 174 | ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); |
170 | pcpu_rt = *ppcpu_rt; | 175 | pcpu_rt = *ppcpu_rt; |
171 | if (pcpu_rt) { | 176 | if (pcpu_rt) { |
172 | dst_free(&pcpu_rt->dst); | 177 | rt6_rcu_free(pcpu_rt); |
173 | *ppcpu_rt = NULL; | 178 | *ppcpu_rt = NULL; |
174 | } | 179 | } |
175 | } | 180 | } |
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt) | |||
181 | { | 186 | { |
182 | if (atomic_dec_and_test(&rt->rt6i_ref)) { | 187 | if (atomic_dec_and_test(&rt->rt6i_ref)) { |
183 | rt6_free_pcpu(rt); | 188 | rt6_free_pcpu(rt); |
184 | dst_free(&rt->dst); | 189 | rt6_rcu_free(rt); |
185 | } | 190 | } |
186 | } | 191 | } |
187 | 192 | ||
@@ -846,7 +851,7 @@ add: | |||
846 | *ins = rt; | 851 | *ins = rt; |
847 | rt->rt6i_node = fn; | 852 | rt->rt6i_node = fn; |
848 | atomic_inc(&rt->rt6i_ref); | 853 | atomic_inc(&rt->rt6i_ref); |
849 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 854 | inet6_rt_notify(RTM_NEWROUTE, rt, info, 0); |
850 | info->nl_net->ipv6.rt6_stats->fib_rt_entries++; | 855 | info->nl_net->ipv6.rt6_stats->fib_rt_entries++; |
851 | 856 | ||
852 | if (!(fn->fn_flags & RTN_RTINFO)) { | 857 | if (!(fn->fn_flags & RTN_RTINFO)) { |
@@ -872,7 +877,7 @@ add: | |||
872 | rt->rt6i_node = fn; | 877 | rt->rt6i_node = fn; |
873 | rt->dst.rt6_next = iter->dst.rt6_next; | 878 | rt->dst.rt6_next = iter->dst.rt6_next; |
874 | atomic_inc(&rt->rt6i_ref); | 879 | atomic_inc(&rt->rt6i_ref); |
875 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 880 | inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); |
876 | if (!(fn->fn_flags & RTN_RTINFO)) { | 881 | if (!(fn->fn_flags & RTN_RTINFO)) { |
877 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; | 882 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
878 | fn->fn_flags |= RTN_RTINFO; | 883 | fn->fn_flags |= RTN_RTINFO; |
@@ -933,6 +938,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
933 | int replace_required = 0; | 938 | int replace_required = 0; |
934 | int sernum = fib6_new_sernum(info->nl_net); | 939 | int sernum = fib6_new_sernum(info->nl_net); |
935 | 940 | ||
941 | if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) && | ||
942 | !atomic_read(&rt->dst.__refcnt))) | ||
943 | return -EINVAL; | ||
944 | |||
936 | if (info->nlh) { | 945 | if (info->nlh) { |
937 | if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) | 946 | if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) |
938 | allow_create = 0; | 947 | allow_create = 0; |
@@ -1025,6 +1034,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
1025 | fib6_start_gc(info->nl_net, rt); | 1034 | fib6_start_gc(info->nl_net, rt); |
1026 | if (!(rt->rt6i_flags & RTF_CACHE)) | 1035 | if (!(rt->rt6i_flags & RTF_CACHE)) |
1027 | fib6_prune_clones(info->nl_net, pn); | 1036 | fib6_prune_clones(info->nl_net, pn); |
1037 | rt->dst.flags &= ~DST_NOCACHE; | ||
1028 | } | 1038 | } |
1029 | 1039 | ||
1030 | out: | 1040 | out: |
@@ -1049,7 +1059,8 @@ out: | |||
1049 | atomic_inc(&pn->leaf->rt6i_ref); | 1059 | atomic_inc(&pn->leaf->rt6i_ref); |
1050 | } | 1060 | } |
1051 | #endif | 1061 | #endif |
1052 | dst_free(&rt->dst); | 1062 | if (!(rt->dst.flags & DST_NOCACHE)) |
1063 | dst_free(&rt->dst); | ||
1053 | } | 1064 | } |
1054 | return err; | 1065 | return err; |
1055 | 1066 | ||
@@ -1060,7 +1071,8 @@ out: | |||
1060 | st_failure: | 1071 | st_failure: |
1061 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) | 1072 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) |
1062 | fib6_repair_tree(info->nl_net, fn); | 1073 | fib6_repair_tree(info->nl_net, fn); |
1063 | dst_free(&rt->dst); | 1074 | if (!(rt->dst.flags & DST_NOCACHE)) |
1075 | dst_free(&rt->dst); | ||
1064 | return err; | 1076 | return err; |
1065 | #endif | 1077 | #endif |
1066 | } | 1078 | } |
@@ -1410,7 +1422,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1410 | 1422 | ||
1411 | fib6_purge_rt(rt, fn, net); | 1423 | fib6_purge_rt(rt, fn, net); |
1412 | 1424 | ||
1413 | inet6_rt_notify(RTM_DELROUTE, rt, info); | 1425 | inet6_rt_notify(RTM_DELROUTE, rt, info, 0); |
1414 | rt6_release(rt); | 1426 | rt6_release(rt); |
1415 | } | 1427 | } |
1416 | 1428 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4038c694ec03..3c7b9310b33f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
404 | struct ipv6_tlv_tnl_enc_lim *tel; | 404 | struct ipv6_tlv_tnl_enc_lim *tel; |
405 | __u32 mtu; | 405 | __u32 mtu; |
406 | case ICMPV6_DEST_UNREACH: | 406 | case ICMPV6_DEST_UNREACH: |
407 | net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", | 407 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", |
408 | t->parms.name); | 408 | t->parms.name); |
409 | break; | 409 | break; |
410 | case ICMPV6_TIME_EXCEED: | 410 | case ICMPV6_TIME_EXCEED: |
411 | if (code == ICMPV6_EXC_HOPLIMIT) { | 411 | if (code == ICMPV6_EXC_HOPLIMIT) { |
412 | net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", | 412 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
413 | t->parms.name); | 413 | t->parms.name); |
414 | } | 414 | } |
415 | break; | 415 | break; |
416 | case ICMPV6_PARAMPROB: | 416 | case ICMPV6_PARAMPROB: |
@@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
421 | if (teli && teli == be32_to_cpu(info) - 2) { | 421 | if (teli && teli == be32_to_cpu(info) - 2) { |
422 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; | 422 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
423 | if (tel->encap_limit == 0) { | 423 | if (tel->encap_limit == 0) { |
424 | net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", | 424 | net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", |
425 | t->parms.name); | 425 | t->parms.name); |
426 | } | 426 | } |
427 | } else { | 427 | } else { |
428 | net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", | 428 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
429 | t->parms.name); | 429 | t->parms.name); |
430 | } | 430 | } |
431 | break; | 431 | break; |
432 | case ICMPV6_PKT_TOOBIG: | 432 | case ICMPV6_PKT_TOOBIG: |
@@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
634 | } | 634 | } |
635 | 635 | ||
636 | if (!fl6->flowi6_mark) | 636 | if (!fl6->flowi6_mark) |
637 | dst = ip6_tnl_dst_check(tunnel); | 637 | dst = ip6_tnl_dst_get(tunnel); |
638 | 638 | ||
639 | if (!dst) { | 639 | if (!dst) { |
640 | ndst = ip6_route_output(net, NULL, fl6); | 640 | dst = ip6_route_output(net, NULL, fl6); |
641 | 641 | ||
642 | if (ndst->error) | 642 | if (dst->error) |
643 | goto tx_err_link_failure; | 643 | goto tx_err_link_failure; |
644 | ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); | 644 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); |
645 | if (IS_ERR(ndst)) { | 645 | if (IS_ERR(dst)) { |
646 | err = PTR_ERR(ndst); | 646 | err = PTR_ERR(dst); |
647 | ndst = NULL; | 647 | dst = NULL; |
648 | goto tx_err_link_failure; | 648 | goto tx_err_link_failure; |
649 | } | 649 | } |
650 | dst = ndst; | 650 | ndst = dst; |
651 | } | 651 | } |
652 | 652 | ||
653 | tdev = dst->dev; | 653 | tdev = dst->dev; |
@@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
702 | skb = new_skb; | 702 | skb = new_skb; |
703 | } | 703 | } |
704 | 704 | ||
705 | if (fl6->flowi6_mark) { | 705 | if (!fl6->flowi6_mark && ndst) |
706 | skb_dst_set(skb, dst); | 706 | ip6_tnl_dst_set(tunnel, ndst); |
707 | ndst = NULL; | 707 | skb_dst_set(skb, dst); |
708 | } else { | ||
709 | skb_dst_set_noref(skb, dst); | ||
710 | } | ||
711 | 708 | ||
712 | proto = NEXTHDR_GRE; | 709 | proto = NEXTHDR_GRE; |
713 | if (encap_limit >= 0) { | 710 | if (encap_limit >= 0) { |
@@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
762 | skb_set_inner_protocol(skb, protocol); | 759 | skb_set_inner_protocol(skb, protocol); |
763 | 760 | ||
764 | ip6tunnel_xmit(NULL, skb, dev); | 761 | ip6tunnel_xmit(NULL, skb, dev); |
765 | if (ndst) | ||
766 | ip6_tnl_dst_store(tunnel, ndst); | ||
767 | return 0; | 762 | return 0; |
768 | tx_err_link_failure: | 763 | tx_err_link_failure: |
769 | stats->tx_carrier_errors++; | 764 | stats->tx_carrier_errors++; |
770 | dst_link_failure(skb); | 765 | dst_link_failure(skb); |
771 | tx_err_dst_release: | 766 | tx_err_dst_release: |
772 | dst_release(ndst); | 767 | dst_release(dst); |
773 | return err; | 768 | return err; |
774 | } | 769 | } |
775 | 770 | ||
@@ -1223,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = { | |||
1223 | 1218 | ||
1224 | static void ip6gre_dev_free(struct net_device *dev) | 1219 | static void ip6gre_dev_free(struct net_device *dev) |
1225 | { | 1220 | { |
1221 | struct ip6_tnl *t = netdev_priv(dev); | ||
1222 | |||
1223 | ip6_tnl_dst_destroy(t); | ||
1226 | free_percpu(dev->tstats); | 1224 | free_percpu(dev->tstats); |
1227 | free_netdev(dev); | 1225 | free_netdev(dev); |
1228 | } | 1226 | } |
@@ -1245,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev) | |||
1245 | netif_keep_dst(dev); | 1243 | netif_keep_dst(dev); |
1246 | } | 1244 | } |
1247 | 1245 | ||
1248 | static int ip6gre_tunnel_init(struct net_device *dev) | 1246 | static int ip6gre_tunnel_init_common(struct net_device *dev) |
1249 | { | 1247 | { |
1250 | struct ip6_tnl *tunnel; | 1248 | struct ip6_tnl *tunnel; |
1249 | int ret; | ||
1251 | 1250 | ||
1252 | tunnel = netdev_priv(dev); | 1251 | tunnel = netdev_priv(dev); |
1253 | 1252 | ||
@@ -1255,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1255 | tunnel->net = dev_net(dev); | 1254 | tunnel->net = dev_net(dev); |
1256 | strcpy(tunnel->parms.name, dev->name); | 1255 | strcpy(tunnel->parms.name, dev->name); |
1257 | 1256 | ||
1257 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1258 | if (!dev->tstats) | ||
1259 | return -ENOMEM; | ||
1260 | |||
1261 | ret = ip6_tnl_dst_init(tunnel); | ||
1262 | if (ret) { | ||
1263 | free_percpu(dev->tstats); | ||
1264 | dev->tstats = NULL; | ||
1265 | return ret; | ||
1266 | } | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static int ip6gre_tunnel_init(struct net_device *dev) | ||
1272 | { | ||
1273 | struct ip6_tnl *tunnel; | ||
1274 | int ret; | ||
1275 | |||
1276 | ret = ip6gre_tunnel_init_common(dev); | ||
1277 | if (ret) | ||
1278 | return ret; | ||
1279 | |||
1280 | tunnel = netdev_priv(dev); | ||
1281 | |||
1258 | memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); | 1282 | memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); |
1259 | memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); | 1283 | memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); |
1260 | 1284 | ||
1261 | if (ipv6_addr_any(&tunnel->parms.raddr)) | 1285 | if (ipv6_addr_any(&tunnel->parms.raddr)) |
1262 | dev->header_ops = &ip6gre_header_ops; | 1286 | dev->header_ops = &ip6gre_header_ops; |
1263 | 1287 | ||
1264 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1265 | if (!dev->tstats) | ||
1266 | return -ENOMEM; | ||
1267 | |||
1268 | return 0; | 1288 | return 0; |
1269 | } | 1289 | } |
1270 | 1290 | ||
@@ -1460,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1460 | static int ip6gre_tap_init(struct net_device *dev) | 1480 | static int ip6gre_tap_init(struct net_device *dev) |
1461 | { | 1481 | { |
1462 | struct ip6_tnl *tunnel; | 1482 | struct ip6_tnl *tunnel; |
1483 | int ret; | ||
1463 | 1484 | ||
1464 | tunnel = netdev_priv(dev); | 1485 | ret = ip6gre_tunnel_init_common(dev); |
1486 | if (ret) | ||
1487 | return ret; | ||
1465 | 1488 | ||
1466 | tunnel->dev = dev; | 1489 | tunnel = netdev_priv(dev); |
1467 | tunnel->net = dev_net(dev); | ||
1468 | strcpy(tunnel->parms.name, dev->name); | ||
1469 | 1490 | ||
1470 | ip6gre_tnl_link_config(tunnel, 1); | 1491 | ip6gre_tnl_link_config(tunnel, 1); |
1471 | 1492 | ||
1472 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1473 | if (!dev->tstats) | ||
1474 | return -ENOMEM; | ||
1475 | |||
1476 | return 0; | 1493 | return 0; |
1477 | } | 1494 | } |
1478 | 1495 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 26ea47930740..92b1aa38f121 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -586,20 +586,22 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
586 | frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, | 586 | frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, |
587 | &ipv6_hdr(skb)->saddr); | 587 | &ipv6_hdr(skb)->saddr); |
588 | 588 | ||
589 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | ||
589 | if (skb_has_frag_list(skb)) { | 590 | if (skb_has_frag_list(skb)) { |
590 | int first_len = skb_pagelen(skb); | 591 | int first_len = skb_pagelen(skb); |
591 | struct sk_buff *frag2; | 592 | struct sk_buff *frag2; |
592 | 593 | ||
593 | if (first_len - hlen > mtu || | 594 | if (first_len - hlen > mtu || |
594 | ((first_len - hlen) & 7) || | 595 | ((first_len - hlen) & 7) || |
595 | skb_cloned(skb)) | 596 | skb_cloned(skb) || |
597 | skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) | ||
596 | goto slow_path; | 598 | goto slow_path; |
597 | 599 | ||
598 | skb_walk_frags(skb, frag) { | 600 | skb_walk_frags(skb, frag) { |
599 | /* Correct geometry. */ | 601 | /* Correct geometry. */ |
600 | if (frag->len > mtu || | 602 | if (frag->len > mtu || |
601 | ((frag->len & 7) && frag->next) || | 603 | ((frag->len & 7) && frag->next) || |
602 | skb_headroom(frag) < hlen) | 604 | skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr))) |
603 | goto slow_path_clean; | 605 | goto slow_path_clean; |
604 | 606 | ||
605 | /* Partially cloned skb? */ | 607 | /* Partially cloned skb? */ |
@@ -616,8 +618,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
616 | 618 | ||
617 | err = 0; | 619 | err = 0; |
618 | offset = 0; | 620 | offset = 0; |
619 | frag = skb_shinfo(skb)->frag_list; | ||
620 | skb_frag_list_init(skb); | ||
621 | /* BUILD HEADER */ | 621 | /* BUILD HEADER */ |
622 | 622 | ||
623 | *prevhdr = NEXTHDR_FRAGMENT; | 623 | *prevhdr = NEXTHDR_FRAGMENT; |
@@ -625,8 +625,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
625 | if (!tmp_hdr) { | 625 | if (!tmp_hdr) { |
626 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 626 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
627 | IPSTATS_MIB_FRAGFAILS); | 627 | IPSTATS_MIB_FRAGFAILS); |
628 | return -ENOMEM; | 628 | err = -ENOMEM; |
629 | goto fail; | ||
629 | } | 630 | } |
631 | frag = skb_shinfo(skb)->frag_list; | ||
632 | skb_frag_list_init(skb); | ||
630 | 633 | ||
631 | __skb_pull(skb, hlen); | 634 | __skb_pull(skb, hlen); |
632 | fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); | 635 | fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); |
@@ -723,7 +726,6 @@ slow_path: | |||
723 | */ | 726 | */ |
724 | 727 | ||
725 | *prevhdr = NEXTHDR_FRAGMENT; | 728 | *prevhdr = NEXTHDR_FRAGMENT; |
726 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | ||
727 | troom = rt->dst.dev->needed_tailroom; | 729 | troom = rt->dst.dev->needed_tailroom; |
728 | 730 | ||
729 | /* | 731 | /* |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b0ab420612bc..eabffbb89795 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) | |||
126 | * Locking : hash tables are protected by RCU and RTNL | 126 | * Locking : hash tables are protected by RCU and RTNL |
127 | */ | 127 | */ |
128 | 128 | ||
129 | struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) | 129 | static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst, |
130 | struct dst_entry *dst) | ||
130 | { | 131 | { |
131 | struct dst_entry *dst = t->dst_cache; | 132 | write_seqlock_bh(&idst->lock); |
133 | dst_release(rcu_dereference_protected( | ||
134 | idst->dst, | ||
135 | lockdep_is_held(&idst->lock.lock))); | ||
136 | if (dst) { | ||
137 | dst_hold(dst); | ||
138 | idst->cookie = rt6_get_cookie((struct rt6_info *)dst); | ||
139 | } else { | ||
140 | idst->cookie = 0; | ||
141 | } | ||
142 | rcu_assign_pointer(idst->dst, dst); | ||
143 | write_sequnlock_bh(&idst->lock); | ||
144 | } | ||
145 | |||
146 | struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t) | ||
147 | { | ||
148 | struct ip6_tnl_dst *idst; | ||
149 | struct dst_entry *dst; | ||
150 | unsigned int seq; | ||
151 | u32 cookie; | ||
132 | 152 | ||
133 | if (dst && dst->obsolete && | 153 | idst = raw_cpu_ptr(t->dst_cache); |
134 | !dst->ops->check(dst, t->dst_cookie)) { | 154 | |
135 | t->dst_cache = NULL; | 155 | rcu_read_lock(); |
156 | do { | ||
157 | seq = read_seqbegin(&idst->lock); | ||
158 | dst = rcu_dereference(idst->dst); | ||
159 | cookie = idst->cookie; | ||
160 | } while (read_seqretry(&idst->lock, seq)); | ||
161 | |||
162 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) | ||
163 | dst = NULL; | ||
164 | rcu_read_unlock(); | ||
165 | |||
166 | if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) { | ||
167 | ip6_tnl_per_cpu_dst_set(idst, NULL); | ||
136 | dst_release(dst); | 168 | dst_release(dst); |
137 | return NULL; | 169 | dst = NULL; |
138 | } | 170 | } |
139 | |||
140 | return dst; | 171 | return dst; |
141 | } | 172 | } |
142 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); | 173 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_get); |
143 | 174 | ||
144 | void ip6_tnl_dst_reset(struct ip6_tnl *t) | 175 | void ip6_tnl_dst_reset(struct ip6_tnl *t) |
145 | { | 176 | { |
146 | dst_release(t->dst_cache); | 177 | int i; |
147 | t->dst_cache = NULL; | 178 | |
179 | for_each_possible_cpu(i) | ||
180 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); | ||
148 | } | 181 | } |
149 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); | 182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); |
150 | 183 | ||
151 | void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | 184 | void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst) |
185 | { | ||
186 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst); | ||
187 | |||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_set); | ||
190 | |||
191 | void ip6_tnl_dst_destroy(struct ip6_tnl *t) | ||
152 | { | 192 | { |
153 | struct rt6_info *rt = (struct rt6_info *) dst; | 193 | if (!t->dst_cache) |
154 | t->dst_cookie = rt6_get_cookie(rt); | 194 | return; |
155 | dst_release(t->dst_cache); | 195 | |
156 | t->dst_cache = dst; | 196 | ip6_tnl_dst_reset(t); |
197 | free_percpu(t->dst_cache); | ||
157 | } | 198 | } |
158 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); | 199 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy); |
200 | |||
201 | int ip6_tnl_dst_init(struct ip6_tnl *t) | ||
202 | { | ||
203 | int i; | ||
204 | |||
205 | t->dst_cache = alloc_percpu(struct ip6_tnl_dst); | ||
206 | if (!t->dst_cache) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | for_each_possible_cpu(i) | ||
210 | seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_init); | ||
159 | 215 | ||
160 | /** | 216 | /** |
161 | * ip6_tnl_lookup - fetch tunnel matching the end-point addresses | 217 | * ip6_tnl_lookup - fetch tunnel matching the end-point addresses |
@@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
271 | 327 | ||
272 | static void ip6_dev_free(struct net_device *dev) | 328 | static void ip6_dev_free(struct net_device *dev) |
273 | { | 329 | { |
330 | struct ip6_tnl *t = netdev_priv(dev); | ||
331 | |||
332 | ip6_tnl_dst_destroy(t); | ||
274 | free_percpu(dev->tstats); | 333 | free_percpu(dev->tstats); |
275 | free_netdev(dev); | 334 | free_netdev(dev); |
276 | } | 335 | } |
@@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
510 | struct ipv6_tlv_tnl_enc_lim *tel; | 569 | struct ipv6_tlv_tnl_enc_lim *tel; |
511 | __u32 mtu; | 570 | __u32 mtu; |
512 | case ICMPV6_DEST_UNREACH: | 571 | case ICMPV6_DEST_UNREACH: |
513 | net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", | 572 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", |
514 | t->parms.name); | 573 | t->parms.name); |
515 | rel_msg = 1; | 574 | rel_msg = 1; |
516 | break; | 575 | break; |
517 | case ICMPV6_TIME_EXCEED: | 576 | case ICMPV6_TIME_EXCEED: |
518 | if ((*code) == ICMPV6_EXC_HOPLIMIT) { | 577 | if ((*code) == ICMPV6_EXC_HOPLIMIT) { |
519 | net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", | 578 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
520 | t->parms.name); | 579 | t->parms.name); |
521 | rel_msg = 1; | 580 | rel_msg = 1; |
522 | } | 581 | } |
523 | break; | 582 | break; |
@@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
529 | if (teli && teli == *info - 2) { | 588 | if (teli && teli == *info - 2) { |
530 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; | 589 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
531 | if (tel->encap_limit == 0) { | 590 | if (tel->encap_limit == 0) { |
532 | net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", | 591 | net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", |
533 | t->parms.name); | 592 | t->parms.name); |
534 | rel_msg = 1; | 593 | rel_msg = 1; |
535 | } | 594 | } |
536 | } else { | 595 | } else { |
537 | net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", | 596 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
538 | t->parms.name); | 597 | t->parms.name); |
539 | } | 598 | } |
540 | break; | 599 | break; |
541 | case ICMPV6_PKT_TOOBIG: | 600 | case ICMPV6_PKT_TOOBIG: |
@@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1010 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); | 1069 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); |
1011 | neigh_release(neigh); | 1070 | neigh_release(neigh); |
1012 | } else if (!fl6->flowi6_mark) | 1071 | } else if (!fl6->flowi6_mark) |
1013 | dst = ip6_tnl_dst_check(t); | 1072 | dst = ip6_tnl_dst_get(t); |
1014 | 1073 | ||
1015 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) | 1074 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) |
1016 | goto tx_err_link_failure; | 1075 | goto tx_err_link_failure; |
1017 | 1076 | ||
1018 | if (!dst) { | 1077 | if (!dst) { |
1019 | ndst = ip6_route_output(net, NULL, fl6); | 1078 | dst = ip6_route_output(net, NULL, fl6); |
1020 | 1079 | ||
1021 | if (ndst->error) | 1080 | if (dst->error) |
1022 | goto tx_err_link_failure; | 1081 | goto tx_err_link_failure; |
1023 | ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); | 1082 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); |
1024 | if (IS_ERR(ndst)) { | 1083 | if (IS_ERR(dst)) { |
1025 | err = PTR_ERR(ndst); | 1084 | err = PTR_ERR(dst); |
1026 | ndst = NULL; | 1085 | dst = NULL; |
1027 | goto tx_err_link_failure; | 1086 | goto tx_err_link_failure; |
1028 | } | 1087 | } |
1029 | dst = ndst; | 1088 | ndst = dst; |
1030 | } | 1089 | } |
1031 | 1090 | ||
1032 | tdev = dst->dev; | 1091 | tdev = dst->dev; |
@@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1072 | consume_skb(skb); | 1131 | consume_skb(skb); |
1073 | skb = new_skb; | 1132 | skb = new_skb; |
1074 | } | 1133 | } |
1075 | if (fl6->flowi6_mark) { | 1134 | |
1076 | skb_dst_set(skb, dst); | 1135 | if (!fl6->flowi6_mark && ndst) |
1077 | ndst = NULL; | 1136 | ip6_tnl_dst_set(t, ndst); |
1078 | } else { | 1137 | skb_dst_set(skb, dst); |
1079 | skb_dst_set_noref(skb, dst); | 1138 | |
1080 | } | ||
1081 | skb->transport_header = skb->network_header; | 1139 | skb->transport_header = skb->network_header; |
1082 | 1140 | ||
1083 | proto = fl6->flowi6_proto; | 1141 | proto = fl6->flowi6_proto; |
@@ -1101,14 +1159,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1101 | ipv6h->saddr = fl6->saddr; | 1159 | ipv6h->saddr = fl6->saddr; |
1102 | ipv6h->daddr = fl6->daddr; | 1160 | ipv6h->daddr = fl6->daddr; |
1103 | ip6tunnel_xmit(NULL, skb, dev); | 1161 | ip6tunnel_xmit(NULL, skb, dev); |
1104 | if (ndst) | ||
1105 | ip6_tnl_dst_store(t, ndst); | ||
1106 | return 0; | 1162 | return 0; |
1107 | tx_err_link_failure: | 1163 | tx_err_link_failure: |
1108 | stats->tx_carrier_errors++; | 1164 | stats->tx_carrier_errors++; |
1109 | dst_link_failure(skb); | 1165 | dst_link_failure(skb); |
1110 | tx_err_dst_release: | 1166 | tx_err_dst_release: |
1111 | dst_release(ndst); | 1167 | dst_release(dst); |
1112 | return err; | 1168 | return err; |
1113 | } | 1169 | } |
1114 | 1170 | ||
@@ -1573,12 +1629,21 @@ static inline int | |||
1573 | ip6_tnl_dev_init_gen(struct net_device *dev) | 1629 | ip6_tnl_dev_init_gen(struct net_device *dev) |
1574 | { | 1630 | { |
1575 | struct ip6_tnl *t = netdev_priv(dev); | 1631 | struct ip6_tnl *t = netdev_priv(dev); |
1632 | int ret; | ||
1576 | 1633 | ||
1577 | t->dev = dev; | 1634 | t->dev = dev; |
1578 | t->net = dev_net(dev); | 1635 | t->net = dev_net(dev); |
1579 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1636 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
1580 | if (!dev->tstats) | 1637 | if (!dev->tstats) |
1581 | return -ENOMEM; | 1638 | return -ENOMEM; |
1639 | |||
1640 | ret = ip6_tnl_dst_init(t); | ||
1641 | if (ret) { | ||
1642 | free_percpu(dev->tstats); | ||
1643 | dev->tstats = NULL; | ||
1644 | return ret; | ||
1645 | } | ||
1646 | |||
1582 | return 0; | 1647 | return 0; |
1583 | } | 1648 | } |
1584 | 1649 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 53617d715188..cb32ce250db0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, | |||
1193 | 1193 | ||
1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; | 1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; |
1195 | 1195 | ||
1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) | 1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || |
1197 | fl6->flowi6_oif) | ||
1197 | flags |= RT6_LOOKUP_F_IFACE; | 1198 | flags |= RT6_LOOKUP_F_IFACE; |
1198 | 1199 | ||
1199 | if (!ipv6_addr_any(&fl6->saddr)) | 1200 | if (!ipv6_addr_any(&fl6->saddr)) |
@@ -1322,8 +1323,7 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
1322 | if (rt) { | 1323 | if (rt) { |
1323 | if (rt->rt6i_flags & RTF_CACHE) { | 1324 | if (rt->rt6i_flags & RTF_CACHE) { |
1324 | dst_hold(&rt->dst); | 1325 | dst_hold(&rt->dst); |
1325 | if (ip6_del_rt(rt)) | 1326 | ip6_del_rt(rt); |
1326 | dst_free(&rt->dst); | ||
1327 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { | 1327 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { |
1328 | rt->rt6i_node->fn_sernum = -1; | 1328 | rt->rt6i_node->fn_sernum = -1; |
1329 | } | 1329 | } |
@@ -1886,9 +1886,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) | |||
1886 | rt->dst.input = ip6_pkt_prohibit; | 1886 | rt->dst.input = ip6_pkt_prohibit; |
1887 | break; | 1887 | break; |
1888 | case RTN_THROW: | 1888 | case RTN_THROW: |
1889 | case RTN_UNREACHABLE: | ||
1889 | default: | 1890 | default: |
1890 | rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN | 1891 | rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN |
1891 | : -ENETUNREACH; | 1892 | : (cfg->fc_type == RTN_UNREACHABLE) |
1893 | ? -EHOSTUNREACH : -ENETUNREACH; | ||
1892 | rt->dst.output = ip6_pkt_discard_out; | 1894 | rt->dst.output = ip6_pkt_discard_out; |
1893 | rt->dst.input = ip6_pkt_discard; | 1895 | rt->dst.input = ip6_pkt_discard; |
1894 | break; | 1896 | break; |
@@ -2028,7 +2030,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
2028 | struct fib6_table *table; | 2030 | struct fib6_table *table; |
2029 | struct net *net = dev_net(rt->dst.dev); | 2031 | struct net *net = dev_net(rt->dst.dev); |
2030 | 2032 | ||
2031 | if (rt == net->ipv6.ip6_null_entry) { | 2033 | if (rt == net->ipv6.ip6_null_entry || |
2034 | rt->dst.flags & DST_NOCACHE) { | ||
2032 | err = -ENOENT; | 2035 | err = -ENOENT; |
2033 | goto out; | 2036 | goto out; |
2034 | } | 2037 | } |
@@ -2515,6 +2518,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2515 | rt->rt6i_dst.addr = *addr; | 2518 | rt->rt6i_dst.addr = *addr; |
2516 | rt->rt6i_dst.plen = 128; | 2519 | rt->rt6i_dst.plen = 128; |
2517 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); | 2520 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
2521 | rt->dst.flags |= DST_NOCACHE; | ||
2518 | 2522 | ||
2519 | atomic_set(&rt->dst.__refcnt, 1); | 2523 | atomic_set(&rt->dst.__refcnt, 1); |
2520 | 2524 | ||
@@ -3303,7 +3307,8 @@ errout: | |||
3303 | return err; | 3307 | return err; |
3304 | } | 3308 | } |
3305 | 3309 | ||
3306 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | 3310 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, |
3311 | unsigned int nlm_flags) | ||
3307 | { | 3312 | { |
3308 | struct sk_buff *skb; | 3313 | struct sk_buff *skb; |
3309 | struct net *net = info->nl_net; | 3314 | struct net *net = info->nl_net; |
@@ -3318,7 +3323,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | |||
3318 | goto errout; | 3323 | goto errout; |
3319 | 3324 | ||
3320 | err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, | 3325 | err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, |
3321 | event, info->portid, seq, 0, 0, 0); | 3326 | event, info->portid, seq, 0, 0, nlm_flags); |
3322 | if (err < 0) { | 3327 | if (err < 0) { |
3323 | /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ | 3328 | /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ |
3324 | WARN_ON(err == -EMSGSIZE); | 3329 | WARN_ON(err == -EMSGSIZE); |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); | 1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); |
1320 | sk = l2tp_tunnel_sock_lookup(tunnel); | 1320 | sk = l2tp_tunnel_sock_lookup(tunnel); |
1321 | if (!sk) | 1321 | if (!sk) |
1322 | return; | 1322 | goto out; |
1323 | 1323 | ||
1324 | sock = sk->sk_socket; | 1324 | sock = sk->sk_socket; |
1325 | 1325 | ||
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | l2tp_tunnel_sock_put(sk); | 1343 | l2tp_tunnel_sock_put(sk); |
1344 | out: | ||
1345 | l2tp_tunnel_dec_refcount(tunnel); | ||
1344 | } | 1346 | } |
1345 | 1347 | ||
1346 | /* Create a socket for the tunnel, if one isn't set up by | 1348 | /* Create a socket for the tunnel, if one isn't set up by |
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1636 | */ | 1638 | */ |
1637 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1639 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1638 | { | 1640 | { |
1641 | l2tp_tunnel_inc_refcount(tunnel); | ||
1639 | l2tp_tunnel_closeall(tunnel); | 1642 | l2tp_tunnel_closeall(tunnel); |
1640 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1643 | if (false == queue_work(l2tp_wq, &tunnel->del_work)) { |
1644 | l2tp_tunnel_dec_refcount(tunnel); | ||
1645 | return 1; | ||
1646 | } | ||
1647 | return 0; | ||
1641 | } | 1648 | } |
1642 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1649 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
1643 | 1650 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 17b1fe961c5d..7a77a1470f25 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2474,6 +2474,7 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, | |||
2474 | 2474 | ||
2475 | bss_conf->cqm_rssi_thold = rssi_thold; | 2475 | bss_conf->cqm_rssi_thold = rssi_thold; |
2476 | bss_conf->cqm_rssi_hyst = rssi_hyst; | 2476 | bss_conf->cqm_rssi_hyst = rssi_hyst; |
2477 | sdata->u.mgd.last_cqm_event_signal = 0; | ||
2477 | 2478 | ||
2478 | /* tell the driver upon association, unless already associated */ | 2479 | /* tell the driver upon association, unless already associated */ |
2479 | if (sdata->u.mgd.associated && | 2480 | if (sdata->u.mgd.associated && |
@@ -2518,15 +2519,17 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2518 | continue; | 2519 | continue; |
2519 | 2520 | ||
2520 | for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { | 2521 | for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { |
2521 | if (~sdata->rc_rateidx_mcs_mask[i][j]) | 2522 | if (~sdata->rc_rateidx_mcs_mask[i][j]) { |
2522 | sdata->rc_has_mcs_mask[i] = true; | 2523 | sdata->rc_has_mcs_mask[i] = true; |
2524 | break; | ||
2525 | } | ||
2526 | } | ||
2523 | 2527 | ||
2524 | if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) | 2528 | for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { |
2529 | if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { | ||
2525 | sdata->rc_has_vht_mcs_mask[i] = true; | 2530 | sdata->rc_has_vht_mcs_mask[i] = true; |
2526 | |||
2527 | if (sdata->rc_has_mcs_mask[i] && | ||
2528 | sdata->rc_has_vht_mcs_mask[i]) | ||
2529 | break; | 2531 | break; |
2532 | } | ||
2530 | } | 2533 | } |
2531 | } | 2534 | } |
2532 | 2535 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 675d12c69e32..a5d41dfa9f05 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register); | |||
107 | 107 | ||
108 | void nf_log_unregister(struct nf_logger *logger) | 108 | void nf_log_unregister(struct nf_logger *logger) |
109 | { | 109 | { |
110 | const struct nf_logger *log; | ||
110 | int i; | 111 | int i; |
111 | 112 | ||
112 | mutex_lock(&nf_log_mutex); | 113 | mutex_lock(&nf_log_mutex); |
113 | for (i = 0; i < NFPROTO_NUMPROTO; i++) | 114 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
114 | RCU_INIT_POINTER(loggers[i][logger->type], NULL); | 115 | log = nft_log_dereference(loggers[i][logger->type]); |
116 | if (log == logger) | ||
117 | RCU_INIT_POINTER(loggers[i][logger->type], NULL); | ||
118 | } | ||
115 | mutex_unlock(&nf_log_mutex); | 119 | mutex_unlock(&nf_log_mutex); |
120 | synchronize_rcu(); | ||
116 | } | 121 | } |
117 | EXPORT_SYMBOL(nf_log_unregister); | 122 | EXPORT_SYMBOL(nf_log_unregister); |
118 | 123 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 66def315eb56..9c8fab00164b 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -619,6 +619,13 @@ struct nft_xt { | |||
619 | 619 | ||
620 | static struct nft_expr_type nft_match_type; | 620 | static struct nft_expr_type nft_match_type; |
621 | 621 | ||
622 | static bool nft_match_cmp(const struct xt_match *match, | ||
623 | const char *name, u32 rev, u32 family) | ||
624 | { | ||
625 | return strcmp(match->name, name) == 0 && match->revision == rev && | ||
626 | (match->family == NFPROTO_UNSPEC || match->family == family); | ||
627 | } | ||
628 | |||
622 | static const struct nft_expr_ops * | 629 | static const struct nft_expr_ops * |
623 | nft_match_select_ops(const struct nft_ctx *ctx, | 630 | nft_match_select_ops(const struct nft_ctx *ctx, |
624 | const struct nlattr * const tb[]) | 631 | const struct nlattr * const tb[]) |
@@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
626 | struct nft_xt *nft_match; | 633 | struct nft_xt *nft_match; |
627 | struct xt_match *match; | 634 | struct xt_match *match; |
628 | char *mt_name; | 635 | char *mt_name; |
629 | __u32 rev, family; | 636 | u32 rev, family; |
630 | 637 | ||
631 | if (tb[NFTA_MATCH_NAME] == NULL || | 638 | if (tb[NFTA_MATCH_NAME] == NULL || |
632 | tb[NFTA_MATCH_REV] == NULL || | 639 | tb[NFTA_MATCH_REV] == NULL || |
@@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
641 | list_for_each_entry(nft_match, &nft_match_list, head) { | 648 | list_for_each_entry(nft_match, &nft_match_list, head) { |
642 | struct xt_match *match = nft_match->ops.data; | 649 | struct xt_match *match = nft_match->ops.data; |
643 | 650 | ||
644 | if (strcmp(match->name, mt_name) == 0 && | 651 | if (nft_match_cmp(match, mt_name, rev, family)) { |
645 | match->revision == rev && match->family == family) { | ||
646 | if (!try_module_get(match->me)) | 652 | if (!try_module_get(match->me)) |
647 | return ERR_PTR(-ENOENT); | 653 | return ERR_PTR(-ENOENT); |
648 | 654 | ||
@@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list); | |||
693 | 699 | ||
694 | static struct nft_expr_type nft_target_type; | 700 | static struct nft_expr_type nft_target_type; |
695 | 701 | ||
702 | static bool nft_target_cmp(const struct xt_target *tg, | ||
703 | const char *name, u32 rev, u32 family) | ||
704 | { | ||
705 | return strcmp(tg->name, name) == 0 && tg->revision == rev && | ||
706 | (tg->family == NFPROTO_UNSPEC || tg->family == family); | ||
707 | } | ||
708 | |||
696 | static const struct nft_expr_ops * | 709 | static const struct nft_expr_ops * |
697 | nft_target_select_ops(const struct nft_ctx *ctx, | 710 | nft_target_select_ops(const struct nft_ctx *ctx, |
698 | const struct nlattr * const tb[]) | 711 | const struct nlattr * const tb[]) |
@@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
700 | struct nft_xt *nft_target; | 713 | struct nft_xt *nft_target; |
701 | struct xt_target *target; | 714 | struct xt_target *target; |
702 | char *tg_name; | 715 | char *tg_name; |
703 | __u32 rev, family; | 716 | u32 rev, family; |
704 | 717 | ||
705 | if (tb[NFTA_TARGET_NAME] == NULL || | 718 | if (tb[NFTA_TARGET_NAME] == NULL || |
706 | tb[NFTA_TARGET_REV] == NULL || | 719 | tb[NFTA_TARGET_REV] == NULL || |
@@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
715 | list_for_each_entry(nft_target, &nft_target_list, head) { | 728 | list_for_each_entry(nft_target, &nft_target_list, head) { |
716 | struct xt_target *target = nft_target->ops.data; | 729 | struct xt_target *target = nft_target->ops.data; |
717 | 730 | ||
718 | if (strcmp(target->name, tg_name) == 0 && | 731 | if (nft_target_cmp(target, tg_name, rev, family)) { |
719 | target->revision == rev && target->family == family) { | ||
720 | if (!try_module_get(target->me)) | 732 | if (!try_module_get(target->me)) |
721 | return ERR_PTR(-ENOENT); | 733 | return ERR_PTR(-ENOENT); |
722 | 734 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7f86d3b55060..8f060d7f9a0e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group) | |||
125 | return group ? 1 << (group - 1) : 0; | 125 | return group ? 1 << (group - 1) : 0; |
126 | } | 126 | } |
127 | 127 | ||
128 | static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, | ||
129 | gfp_t gfp_mask) | ||
130 | { | ||
131 | unsigned int len = skb_end_offset(skb); | ||
132 | struct sk_buff *new; | ||
133 | |||
134 | new = alloc_skb(len, gfp_mask); | ||
135 | if (new == NULL) | ||
136 | return NULL; | ||
137 | |||
138 | NETLINK_CB(new).portid = NETLINK_CB(skb).portid; | ||
139 | NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; | ||
140 | NETLINK_CB(new).creds = NETLINK_CB(skb).creds; | ||
141 | |||
142 | memcpy(skb_put(new, len), skb->data, len); | ||
143 | return new; | ||
144 | } | ||
145 | |||
128 | int netlink_add_tap(struct netlink_tap *nt) | 146 | int netlink_add_tap(struct netlink_tap *nt) |
129 | { | 147 | { |
130 | if (unlikely(nt->dev->type != ARPHRD_NETLINK)) | 148 | if (unlikely(nt->dev->type != ARPHRD_NETLINK)) |
@@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, | |||
206 | int ret = -ENOMEM; | 224 | int ret = -ENOMEM; |
207 | 225 | ||
208 | dev_hold(dev); | 226 | dev_hold(dev); |
209 | nskb = skb_clone(skb, GFP_ATOMIC); | 227 | |
228 | if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) | ||
229 | nskb = netlink_to_full_skb(skb, GFP_ATOMIC); | ||
230 | else | ||
231 | nskb = skb_clone(skb, GFP_ATOMIC); | ||
210 | if (nskb) { | 232 | if (nskb) { |
211 | nskb->dev = dev; | 233 | nskb->dev = dev; |
212 | nskb->protocol = htons((u16) sk->sk_protocol); | 234 | nskb->protocol = htons((u16) sk->sk_protocol); |
@@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk) | |||
279 | } | 301 | } |
280 | 302 | ||
281 | #ifdef CONFIG_NETLINK_MMAP | 303 | #ifdef CONFIG_NETLINK_MMAP |
282 | static bool netlink_skb_is_mmaped(const struct sk_buff *skb) | ||
283 | { | ||
284 | return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; | ||
285 | } | ||
286 | |||
287 | static bool netlink_rx_is_mmaped(struct sock *sk) | 304 | static bool netlink_rx_is_mmaped(struct sock *sk) |
288 | { | 305 | { |
289 | return nlk_sk(sk)->rx_ring.pg_vec != NULL; | 306 | return nlk_sk(sk)->rx_ring.pg_vec != NULL; |
@@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) | |||
846 | } | 863 | } |
847 | 864 | ||
848 | #else /* CONFIG_NETLINK_MMAP */ | 865 | #else /* CONFIG_NETLINK_MMAP */ |
849 | #define netlink_skb_is_mmaped(skb) false | ||
850 | #define netlink_rx_is_mmaped(sk) false | 866 | #define netlink_rx_is_mmaped(sk) false |
851 | #define netlink_tx_is_mmaped(sk) false | 867 | #define netlink_tx_is_mmaped(sk) false |
852 | #define netlink_mmap sock_no_mmap | 868 | #define netlink_mmap sock_no_mmap |
@@ -1094,8 +1110,8 @@ static int netlink_insert(struct sock *sk, u32 portid) | |||
1094 | 1110 | ||
1095 | lock_sock(sk); | 1111 | lock_sock(sk); |
1096 | 1112 | ||
1097 | err = -EBUSY; | 1113 | err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; |
1098 | if (nlk_sk(sk)->portid) | 1114 | if (nlk_sk(sk)->bound) |
1099 | goto err; | 1115 | goto err; |
1100 | 1116 | ||
1101 | err = -ENOMEM; | 1117 | err = -ENOMEM; |
@@ -1115,10 +1131,14 @@ static int netlink_insert(struct sock *sk, u32 portid) | |||
1115 | err = -EOVERFLOW; | 1131 | err = -EOVERFLOW; |
1116 | if (err == -EEXIST) | 1132 | if (err == -EEXIST) |
1117 | err = -EADDRINUSE; | 1133 | err = -EADDRINUSE; |
1118 | nlk_sk(sk)->portid = 0; | ||
1119 | sock_put(sk); | 1134 | sock_put(sk); |
1135 | goto err; | ||
1120 | } | 1136 | } |
1121 | 1137 | ||
1138 | /* We need to ensure that the socket is hashed and visible. */ | ||
1139 | smp_wmb(); | ||
1140 | nlk_sk(sk)->bound = portid; | ||
1141 | |||
1122 | err: | 1142 | err: |
1123 | release_sock(sk); | 1143 | release_sock(sk); |
1124 | return err; | 1144 | return err; |
@@ -1503,6 +1523,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1503 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 1523 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
1504 | int err; | 1524 | int err; |
1505 | long unsigned int groups = nladdr->nl_groups; | 1525 | long unsigned int groups = nladdr->nl_groups; |
1526 | bool bound; | ||
1506 | 1527 | ||
1507 | if (addr_len < sizeof(struct sockaddr_nl)) | 1528 | if (addr_len < sizeof(struct sockaddr_nl)) |
1508 | return -EINVAL; | 1529 | return -EINVAL; |
@@ -1519,9 +1540,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1519 | return err; | 1540 | return err; |
1520 | } | 1541 | } |
1521 | 1542 | ||
1522 | if (nlk->portid) | 1543 | bound = nlk->bound; |
1544 | if (bound) { | ||
1545 | /* Ensure nlk->portid is up-to-date. */ | ||
1546 | smp_rmb(); | ||
1547 | |||
1523 | if (nladdr->nl_pid != nlk->portid) | 1548 | if (nladdr->nl_pid != nlk->portid) |
1524 | return -EINVAL; | 1549 | return -EINVAL; |
1550 | } | ||
1525 | 1551 | ||
1526 | if (nlk->netlink_bind && groups) { | 1552 | if (nlk->netlink_bind && groups) { |
1527 | int group; | 1553 | int group; |
@@ -1537,7 +1563,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1537 | } | 1563 | } |
1538 | } | 1564 | } |
1539 | 1565 | ||
1540 | if (!nlk->portid) { | 1566 | /* No need for barriers here as we return to user-space without |
1567 | * using any of the bound attributes. | ||
1568 | */ | ||
1569 | if (!bound) { | ||
1541 | err = nladdr->nl_pid ? | 1570 | err = nladdr->nl_pid ? |
1542 | netlink_insert(sk, nladdr->nl_pid) : | 1571 | netlink_insert(sk, nladdr->nl_pid) : |
1543 | netlink_autobind(sock); | 1572 | netlink_autobind(sock); |
@@ -1585,7 +1614,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |||
1585 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) | 1614 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
1586 | return -EPERM; | 1615 | return -EPERM; |
1587 | 1616 | ||
1588 | if (!nlk->portid) | 1617 | /* No need for barriers here as we return to user-space without |
1618 | * using any of the bound attributes. | ||
1619 | */ | ||
1620 | if (!nlk->bound) | ||
1589 | err = netlink_autobind(sock); | 1621 | err = netlink_autobind(sock); |
1590 | 1622 | ||
1591 | if (err == 0) { | 1623 | if (err == 0) { |
@@ -2426,10 +2458,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
2426 | dst_group = nlk->dst_group; | 2458 | dst_group = nlk->dst_group; |
2427 | } | 2459 | } |
2428 | 2460 | ||
2429 | if (!nlk->portid) { | 2461 | if (!nlk->bound) { |
2430 | err = netlink_autobind(sock); | 2462 | err = netlink_autobind(sock); |
2431 | if (err) | 2463 | if (err) |
2432 | goto out; | 2464 | goto out; |
2465 | } else { | ||
2466 | /* Ensure nlk is hashed and visible. */ | ||
2467 | smp_rmb(); | ||
2433 | } | 2468 | } |
2434 | 2469 | ||
2435 | /* It's a really convoluted way for userland to ask for mmaped | 2470 | /* It's a really convoluted way for userland to ask for mmaped |
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 89008405d6b4..14437d9b1965 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h | |||
@@ -35,6 +35,7 @@ struct netlink_sock { | |||
35 | unsigned long state; | 35 | unsigned long state; |
36 | size_t max_recvmsg_len; | 36 | size_t max_recvmsg_len; |
37 | wait_queue_head_t wait; | 37 | wait_queue_head_t wait; |
38 | bool bound; | ||
38 | bool cb_running; | 39 | bool cb_running; |
39 | struct netlink_callback cb; | 40 | struct netlink_callback cb; |
40 | struct mutex *cb_mutex; | 41 | struct mutex *cb_mutex; |
@@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) | |||
59 | return container_of(sk, struct netlink_sock, sk); | 60 | return container_of(sk, struct netlink_sock, sk); |
60 | } | 61 | } |
61 | 62 | ||
63 | static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) | ||
64 | { | ||
65 | #ifdef CONFIG_NETLINK_MMAP | ||
66 | return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; | ||
67 | #else | ||
68 | return false; | ||
69 | #endif /* CONFIG_NETLINK_MMAP */ | ||
70 | } | ||
71 | |||
62 | struct netlink_table { | 72 | struct netlink_table { |
63 | struct rhashtable hash; | 73 | struct rhashtable hash; |
64 | struct hlist_head mc_list; | 74 | struct hlist_head mc_list; |
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 2a071f470d57..d143aa9f6654 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -5,7 +5,8 @@ | |||
5 | config OPENVSWITCH | 5 | config OPENVSWITCH |
6 | tristate "Open vSwitch" | 6 | tristate "Open vSwitch" |
7 | depends on INET | 7 | depends on INET |
8 | depends on (!NF_CONNTRACK || NF_CONNTRACK) | 8 | depends on !NF_CONNTRACK || \ |
9 | (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6)) | ||
9 | select LIBCRC32C | 10 | select LIBCRC32C |
10 | select MPLS | 11 | select MPLS |
11 | select NET_MPLS_GSO | 12 | select NET_MPLS_GSO |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e8e524ad8a01..002a755fa07e 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -275,13 +275,15 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto) | |||
275 | case NFPROTO_IPV6: { | 275 | case NFPROTO_IPV6: { |
276 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 276 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
277 | __be16 frag_off; | 277 | __be16 frag_off; |
278 | int ofs; | ||
278 | 279 | ||
279 | protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), | 280 | ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, |
280 | &nexthdr, &frag_off); | 281 | &frag_off); |
281 | if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { | 282 | if (ofs < 0 || (frag_off & htons(~0x7)) != 0) { |
282 | pr_debug("proto header not found\n"); | 283 | pr_debug("proto header not found\n"); |
283 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
284 | } | 285 | } |
286 | protoff = ofs; | ||
285 | break; | 287 | break; |
286 | } | 288 | } |
287 | default: | 289 | default: |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6fbd2decb19e..b816ff871528 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -952,7 +952,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
952 | if (error) | 952 | if (error) |
953 | goto err_kfree_flow; | 953 | goto err_kfree_flow; |
954 | 954 | ||
955 | ovs_flow_mask_key(&new_flow->key, &key, &mask); | 955 | ovs_flow_mask_key(&new_flow->key, &key, true, &mask); |
956 | 956 | ||
957 | /* Extract flow identifier. */ | 957 | /* Extract flow identifier. */ |
958 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], | 958 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], |
@@ -1080,7 +1080,7 @@ static struct sw_flow_actions *get_flow_actions(struct net *net, | |||
1080 | struct sw_flow_key masked_key; | 1080 | struct sw_flow_key masked_key; |
1081 | int error; | 1081 | int error; |
1082 | 1082 | ||
1083 | ovs_flow_mask_key(&masked_key, key, mask); | 1083 | ovs_flow_mask_key(&masked_key, key, true, mask); |
1084 | error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); | 1084 | error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); |
1085 | if (error) { | 1085 | if (error) { |
1086 | OVS_NLERR(log, | 1086 | OVS_NLERR(log, |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index c92d6a262bc5..5c030a4d7338 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -57,6 +57,7 @@ struct ovs_len_tbl { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define OVS_ATTR_NESTED -1 | 59 | #define OVS_ATTR_NESTED -1 |
60 | #define OVS_ATTR_VARIABLE -2 | ||
60 | 61 | ||
61 | static void update_range(struct sw_flow_match *match, | 62 | static void update_range(struct sw_flow_match *match, |
62 | size_t offset, size_t size, bool is_mask) | 63 | size_t offset, size_t size, bool is_mask) |
@@ -304,6 +305,10 @@ size_t ovs_key_attr_size(void) | |||
304 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ | 305 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ |
305 | } | 306 | } |
306 | 307 | ||
308 | static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = { | ||
309 | [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) }, | ||
310 | }; | ||
311 | |||
307 | static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | 312 | static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { |
308 | [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, | 313 | [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, |
309 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, | 314 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, |
@@ -315,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] | |||
315 | [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, | 320 | [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, |
316 | [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, | 321 | [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, |
317 | [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, | 322 | [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, |
318 | [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED }, | 323 | [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE }, |
319 | [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED }, | 324 | [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED, |
325 | .next = ovs_vxlan_ext_key_lens }, | ||
320 | }; | 326 | }; |
321 | 327 | ||
322 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | 328 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ |
@@ -349,6 +355,13 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | |||
349 | [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, | 355 | [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, |
350 | }; | 356 | }; |
351 | 357 | ||
358 | static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) | ||
359 | { | ||
360 | return expected_len == attr_len || | ||
361 | expected_len == OVS_ATTR_NESTED || | ||
362 | expected_len == OVS_ATTR_VARIABLE; | ||
363 | } | ||
364 | |||
352 | static bool is_all_zero(const u8 *fp, size_t size) | 365 | static bool is_all_zero(const u8 *fp, size_t size) |
353 | { | 366 | { |
354 | int i; | 367 | int i; |
@@ -388,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, | |||
388 | } | 401 | } |
389 | 402 | ||
390 | expected_len = ovs_key_lens[type].len; | 403 | expected_len = ovs_key_lens[type].len; |
391 | if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) { | 404 | if (!check_attr_len(nla_len(nla), expected_len)) { |
392 | OVS_NLERR(log, "Key %d has unexpected len %d expected %d", | 405 | OVS_NLERR(log, "Key %d has unexpected len %d expected %d", |
393 | type, nla_len(nla), expected_len); | 406 | type, nla_len(nla), expected_len); |
394 | return -EINVAL; | 407 | return -EINVAL; |
@@ -473,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a, | |||
473 | return 0; | 486 | return 0; |
474 | } | 487 | } |
475 | 488 | ||
476 | static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = { | 489 | static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, |
477 | [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 }, | ||
478 | }; | ||
479 | |||
480 | static int vxlan_tun_opt_from_nlattr(const struct nlattr *a, | ||
481 | struct sw_flow_match *match, bool is_mask, | 490 | struct sw_flow_match *match, bool is_mask, |
482 | bool log) | 491 | bool log) |
483 | { | 492 | { |
484 | struct nlattr *tb[OVS_VXLAN_EXT_MAX+1]; | 493 | struct nlattr *a; |
494 | int rem; | ||
485 | unsigned long opt_key_offset; | 495 | unsigned long opt_key_offset; |
486 | struct vxlan_metadata opts; | 496 | struct vxlan_metadata opts; |
487 | int err; | ||
488 | 497 | ||
489 | BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); | 498 | BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); |
490 | 499 | ||
491 | err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy); | ||
492 | if (err < 0) | ||
493 | return err; | ||
494 | |||
495 | memset(&opts, 0, sizeof(opts)); | 500 | memset(&opts, 0, sizeof(opts)); |
501 | nla_for_each_nested(a, attr, rem) { | ||
502 | int type = nla_type(a); | ||
496 | 503 | ||
497 | if (tb[OVS_VXLAN_EXT_GBP]) | 504 | if (type > OVS_VXLAN_EXT_MAX) { |
498 | opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]); | 505 | OVS_NLERR(log, "VXLAN extension %d out of range max %d", |
506 | type, OVS_VXLAN_EXT_MAX); | ||
507 | return -EINVAL; | ||
508 | } | ||
509 | |||
510 | if (!check_attr_len(nla_len(a), | ||
511 | ovs_vxlan_ext_key_lens[type].len)) { | ||
512 | OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d", | ||
513 | type, nla_len(a), | ||
514 | ovs_vxlan_ext_key_lens[type].len); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | |||
518 | switch (type) { | ||
519 | case OVS_VXLAN_EXT_GBP: | ||
520 | opts.gbp = nla_get_u32(a); | ||
521 | break; | ||
522 | default: | ||
523 | OVS_NLERR(log, "Unknown VXLAN extension attribute %d", | ||
524 | type); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | } | ||
528 | if (rem) { | ||
529 | OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.", | ||
530 | rem); | ||
531 | return -EINVAL; | ||
532 | } | ||
499 | 533 | ||
500 | if (!is_mask) | 534 | if (!is_mask) |
501 | SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); | 535 | SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); |
@@ -528,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, | |||
528 | return -EINVAL; | 562 | return -EINVAL; |
529 | } | 563 | } |
530 | 564 | ||
531 | if (ovs_tunnel_key_lens[type].len != nla_len(a) && | 565 | if (!check_attr_len(nla_len(a), |
532 | ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) { | 566 | ovs_tunnel_key_lens[type].len)) { |
533 | OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", | 567 | OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", |
534 | type, nla_len(a), ovs_tunnel_key_lens[type].len); | 568 | type, nla_len(a), ovs_tunnel_key_lens[type].len); |
535 | return -EINVAL; | 569 | return -EINVAL; |
@@ -1052,10 +1086,13 @@ static void nlattr_set(struct nlattr *attr, u8 val, | |||
1052 | 1086 | ||
1053 | /* The nlattr stream should already have been validated */ | 1087 | /* The nlattr stream should already have been validated */ |
1054 | nla_for_each_nested(nla, attr, rem) { | 1088 | nla_for_each_nested(nla, attr, rem) { |
1055 | if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) | 1089 | if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { |
1056 | nlattr_set(nla, val, tbl[nla_type(nla)].next); | 1090 | if (tbl[nla_type(nla)].next) |
1057 | else | 1091 | tbl = tbl[nla_type(nla)].next; |
1092 | nlattr_set(nla, val, tbl); | ||
1093 | } else { | ||
1058 | memset(nla_data(nla), val, nla_len(nla)); | 1094 | memset(nla_data(nla), val, nla_len(nla)); |
1095 | } | ||
1059 | } | 1096 | } |
1060 | } | 1097 | } |
1061 | 1098 | ||
@@ -1922,8 +1959,7 @@ static int validate_set(const struct nlattr *a, | |||
1922 | key_len /= 2; | 1959 | key_len /= 2; |
1923 | 1960 | ||
1924 | if (key_type > OVS_KEY_ATTR_MAX || | 1961 | if (key_type > OVS_KEY_ATTR_MAX || |
1925 | (ovs_key_lens[key_type].len != key_len && | 1962 | !check_attr_len(key_len, ovs_key_lens[key_type].len)) |
1926 | ovs_key_lens[key_type].len != OVS_ATTR_NESTED)) | ||
1927 | return -EINVAL; | 1963 | return -EINVAL; |
1928 | 1964 | ||
1929 | if (masked && !validate_masked(nla_data(ovs_key), key_len)) | 1965 | if (masked && !validate_masked(nla_data(ovs_key), key_len)) |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index d22d8e948d0f..f2ea83ba4763 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -57,20 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 59 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
60 | const struct sw_flow_mask *mask) | 60 | bool full, const struct sw_flow_mask *mask) |
61 | { | 61 | { |
62 | const long *m = (const long *)((const u8 *)&mask->key + | 62 | int start = full ? 0 : mask->range.start; |
63 | mask->range.start); | 63 | int len = full ? sizeof *dst : range_n_bytes(&mask->range); |
64 | const long *s = (const long *)((const u8 *)src + | 64 | const long *m = (const long *)((const u8 *)&mask->key + start); |
65 | mask->range.start); | 65 | const long *s = (const long *)((const u8 *)src + start); |
66 | long *d = (long *)((u8 *)dst + mask->range.start); | 66 | long *d = (long *)((u8 *)dst + start); |
67 | int i; | 67 | int i; |
68 | 68 | ||
69 | /* The memory outside of the 'mask->range' are not set since | 69 | /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, |
70 | * further operations on 'dst' only uses contents within | 70 | * if 'full' is false the memory outside of the 'mask->range' is left |
71 | * 'mask->range'. | 71 | * uninitialized. This can be used as an optimization when further |
72 | * operations on 'dst' only use contents within 'mask->range'. | ||
72 | */ | 73 | */ |
73 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | 74 | for (i = 0; i < len; i += sizeof(long)) |
74 | *d++ = *s++ & *m++; | 75 | *d++ = *s++ & *m++; |
75 | } | 76 | } |
76 | 77 | ||
@@ -475,7 +476,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | |||
475 | u32 hash; | 476 | u32 hash; |
476 | struct sw_flow_key masked_key; | 477 | struct sw_flow_key masked_key; |
477 | 478 | ||
478 | ovs_flow_mask_key(&masked_key, unmasked, mask); | 479 | ovs_flow_mask_key(&masked_key, unmasked, false, mask); |
479 | hash = flow_hash(&masked_key, &mask->range); | 480 | hash = flow_hash(&masked_key, &mask->range); |
480 | head = find_bucket(ti, hash); | 481 | head = find_bucket(ti, hash); |
481 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { | 482 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 616eda10d955..2dd9900f533d 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, | |||
86 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); | 86 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); |
87 | 87 | ||
88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
89 | const struct sw_flow_mask *mask); | 89 | bool full, const struct sw_flow_mask *mask); |
90 | #endif /* flow_table.h */ | 90 | #endif /* flow_table.h */ |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 7b8e39a22387..aa4b15c35884 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -230,6 +230,8 @@ struct packet_skb_cb { | |||
230 | } sa; | 230 | } sa; |
231 | }; | 231 | }; |
232 | 232 | ||
233 | #define vio_le() virtio_legacy_is_little_endian() | ||
234 | |||
233 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | 235 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) |
234 | 236 | ||
235 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) | 237 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
@@ -2680,15 +2682,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2680 | goto out_unlock; | 2682 | goto out_unlock; |
2681 | 2683 | ||
2682 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | 2684 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
2683 | (__virtio16_to_cpu(false, vnet_hdr.csum_start) + | 2685 | (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
2684 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > | 2686 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 > |
2685 | __virtio16_to_cpu(false, vnet_hdr.hdr_len))) | 2687 | __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len))) |
2686 | vnet_hdr.hdr_len = __cpu_to_virtio16(false, | 2688 | vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(), |
2687 | __virtio16_to_cpu(false, vnet_hdr.csum_start) + | 2689 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
2688 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); | 2690 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2); |
2689 | 2691 | ||
2690 | err = -EINVAL; | 2692 | err = -EINVAL; |
2691 | if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) | 2693 | if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len) |
2692 | goto out_unlock; | 2694 | goto out_unlock; |
2693 | 2695 | ||
2694 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 2696 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
@@ -2731,7 +2733,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2731 | hlen = LL_RESERVED_SPACE(dev); | 2733 | hlen = LL_RESERVED_SPACE(dev); |
2732 | tlen = dev->needed_tailroom; | 2734 | tlen = dev->needed_tailroom; |
2733 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, | 2735 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
2734 | __virtio16_to_cpu(false, vnet_hdr.hdr_len), | 2736 | __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), |
2735 | msg->msg_flags & MSG_DONTWAIT, &err); | 2737 | msg->msg_flags & MSG_DONTWAIT, &err); |
2736 | if (skb == NULL) | 2738 | if (skb == NULL) |
2737 | goto out_unlock; | 2739 | goto out_unlock; |
@@ -2778,8 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2778 | 2780 | ||
2779 | if (po->has_vnet_hdr) { | 2781 | if (po->has_vnet_hdr) { |
2780 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 2782 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
2781 | u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); | 2783 | u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start); |
2782 | u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); | 2784 | u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset); |
2783 | if (!skb_partial_csum_set(skb, s, o)) { | 2785 | if (!skb_partial_csum_set(skb, s, o)) { |
2784 | err = -EINVAL; | 2786 | err = -EINVAL; |
2785 | goto out_free; | 2787 | goto out_free; |
@@ -2787,7 +2789,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2787 | } | 2789 | } |
2788 | 2790 | ||
2789 | skb_shinfo(skb)->gso_size = | 2791 | skb_shinfo(skb)->gso_size = |
2790 | __virtio16_to_cpu(false, vnet_hdr.gso_size); | 2792 | __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size); |
2791 | skb_shinfo(skb)->gso_type = gso_type; | 2793 | skb_shinfo(skb)->gso_type = gso_type; |
2792 | 2794 | ||
2793 | /* Header must be checked, and gso_segs computed. */ | 2795 | /* Header must be checked, and gso_segs computed. */ |
@@ -3161,9 +3163,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
3161 | 3163 | ||
3162 | /* This is a hint as to how much should be linear. */ | 3164 | /* This is a hint as to how much should be linear. */ |
3163 | vnet_hdr.hdr_len = | 3165 | vnet_hdr.hdr_len = |
3164 | __cpu_to_virtio16(false, skb_headlen(skb)); | 3166 | __cpu_to_virtio16(vio_le(), skb_headlen(skb)); |
3165 | vnet_hdr.gso_size = | 3167 | vnet_hdr.gso_size = |
3166 | __cpu_to_virtio16(false, sinfo->gso_size); | 3168 | __cpu_to_virtio16(vio_le(), sinfo->gso_size); |
3167 | if (sinfo->gso_type & SKB_GSO_TCPV4) | 3169 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
3168 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 3170 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
3169 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 3171 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
@@ -3181,9 +3183,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
3181 | 3183 | ||
3182 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3184 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3183 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | 3185 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
3184 | vnet_hdr.csum_start = __cpu_to_virtio16(false, | 3186 | vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(), |
3185 | skb_checksum_start_offset(skb)); | 3187 | skb_checksum_start_offset(skb)); |
3186 | vnet_hdr.csum_offset = __cpu_to_virtio16(false, | 3188 | vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(), |
3187 | skb->csum_offset); | 3189 | skb->csum_offset); |
3188 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | 3190 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
3189 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | 3191 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 715e01e5910a..f23a3b68bba6 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | struct fw_head { | 34 | struct fw_head { |
35 | u32 mask; | 35 | u32 mask; |
36 | bool mask_set; | ||
37 | struct fw_filter __rcu *ht[HTSIZE]; | 36 | struct fw_filter __rcu *ht[HTSIZE]; |
38 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
39 | }; | 38 | }; |
@@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
84 | } | 83 | } |
85 | } | 84 | } |
86 | } else { | 85 | } else { |
87 | /* old method */ | 86 | /* Old method: classify the packet using its skb mark. */ |
88 | if (id && (TC_H_MAJ(id) == 0 || | 87 | if (id && (TC_H_MAJ(id) == 0 || |
89 | !(TC_H_MAJ(id ^ tp->q->handle)))) { | 88 | !(TC_H_MAJ(id ^ tp->q->handle)))) { |
90 | res->classid = id; | 89 | res->classid = id; |
@@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) | |||
114 | 113 | ||
115 | static int fw_init(struct tcf_proto *tp) | 114 | static int fw_init(struct tcf_proto *tp) |
116 | { | 115 | { |
117 | struct fw_head *head; | 116 | /* We don't allocate fw_head here, because in the old method |
118 | 117 | * we don't need it at all. | |
119 | head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); | 118 | */ |
120 | if (head == NULL) | ||
121 | return -ENOBUFS; | ||
122 | |||
123 | head->mask_set = false; | ||
124 | rcu_assign_pointer(tp->root, head); | ||
125 | return 0; | 119 | return 0; |
126 | } | 120 | } |
127 | 121 | ||
@@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, | |||
252 | int err; | 246 | int err; |
253 | 247 | ||
254 | if (!opt) | 248 | if (!opt) |
255 | return handle ? -EINVAL : 0; | 249 | return handle ? -EINVAL : 0; /* Succeed if it is old method. */ |
256 | 250 | ||
257 | err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); | 251 | err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); |
258 | if (err < 0) | 252 | if (err < 0) |
@@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, | |||
302 | if (!handle) | 296 | if (!handle) |
303 | return -EINVAL; | 297 | return -EINVAL; |
304 | 298 | ||
305 | if (!head->mask_set) { | 299 | if (!head) { |
306 | head->mask = 0xFFFFFFFF; | 300 | u32 mask = 0xFFFFFFFF; |
307 | if (tb[TCA_FW_MASK]) | 301 | if (tb[TCA_FW_MASK]) |
308 | head->mask = nla_get_u32(tb[TCA_FW_MASK]); | 302 | mask = nla_get_u32(tb[TCA_FW_MASK]); |
309 | head->mask_set = true; | 303 | |
304 | head = kzalloc(sizeof(*head), GFP_KERNEL); | ||
305 | if (!head) | ||
306 | return -ENOBUFS; | ||
307 | head->mask = mask; | ||
308 | |||
309 | rcu_assign_pointer(tp->root, head); | ||
310 | } | 310 | } |
311 | 311 | ||
312 | f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); | 312 | f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1208 | * within this document. | 1208 | * within this document. |
1209 | * | 1209 | * |
1210 | * Our basic strategy is to round-robin transports in priorities | 1210 | * Our basic strategy is to round-robin transports in priorities |
1211 | * according to sctp_state_prio_map[] e.g., if no such | 1211 | * according to sctp_trans_score() e.g., if no such |
1212 | * transport with state SCTP_ACTIVE exists, round-robin through | 1212 | * transport with state SCTP_ACTIVE exists, round-robin through |
1213 | * SCTP_UNKNOWN, etc. You get the picture. | 1213 | * SCTP_UNKNOWN, etc. You get the picture. |
1214 | */ | 1214 | */ |
1215 | static const u8 sctp_trans_state_to_prio_map[] = { | ||
1216 | [SCTP_ACTIVE] = 3, /* best case */ | ||
1217 | [SCTP_UNKNOWN] = 2, | ||
1218 | [SCTP_PF] = 1, | ||
1219 | [SCTP_INACTIVE] = 0, /* worst case */ | ||
1220 | }; | ||
1221 | |||
1222 | static u8 sctp_trans_score(const struct sctp_transport *trans) | 1215 | static u8 sctp_trans_score(const struct sctp_transport *trans) |
1223 | { | 1216 | { |
1224 | return sctp_trans_state_to_prio_map[trans->state]; | 1217 | switch (trans->state) { |
1218 | case SCTP_ACTIVE: | ||
1219 | return 3; /* best case */ | ||
1220 | case SCTP_UNKNOWN: | ||
1221 | return 2; | ||
1222 | case SCTP_PF: | ||
1223 | return 1; | ||
1224 | default: /* case SCTP_INACTIVE */ | ||
1225 | return 0; /* worst case */ | ||
1226 | } | ||
1225 | } | 1227 | } |
1226 | 1228 | ||
1227 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, | 1229 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b7143337e4fa..3d9ea9a48289 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1186,7 +1186,7 @@ static void sctp_v4_del_protocol(void) | |||
1186 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | 1186 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static int __net_init sctp_net_init(struct net *net) | 1189 | static int __net_init sctp_defaults_init(struct net *net) |
1190 | { | 1190 | { |
1191 | int status; | 1191 | int status; |
1192 | 1192 | ||
@@ -1279,12 +1279,6 @@ static int __net_init sctp_net_init(struct net *net) | |||
1279 | 1279 | ||
1280 | sctp_dbg_objcnt_init(net); | 1280 | sctp_dbg_objcnt_init(net); |
1281 | 1281 | ||
1282 | /* Initialize the control inode/socket for handling OOTB packets. */ | ||
1283 | if ((status = sctp_ctl_sock_init(net))) { | ||
1284 | pr_err("Failed to initialize the SCTP control sock\n"); | ||
1285 | goto err_ctl_sock_init; | ||
1286 | } | ||
1287 | |||
1288 | /* Initialize the local address list. */ | 1282 | /* Initialize the local address list. */ |
1289 | INIT_LIST_HEAD(&net->sctp.local_addr_list); | 1283 | INIT_LIST_HEAD(&net->sctp.local_addr_list); |
1290 | spin_lock_init(&net->sctp.local_addr_lock); | 1284 | spin_lock_init(&net->sctp.local_addr_lock); |
@@ -1300,9 +1294,6 @@ static int __net_init sctp_net_init(struct net *net) | |||
1300 | 1294 | ||
1301 | return 0; | 1295 | return 0; |
1302 | 1296 | ||
1303 | err_ctl_sock_init: | ||
1304 | sctp_dbg_objcnt_exit(net); | ||
1305 | sctp_proc_exit(net); | ||
1306 | err_init_proc: | 1297 | err_init_proc: |
1307 | cleanup_sctp_mibs(net); | 1298 | cleanup_sctp_mibs(net); |
1308 | err_init_mibs: | 1299 | err_init_mibs: |
@@ -1311,15 +1302,12 @@ err_sysctl_register: | |||
1311 | return status; | 1302 | return status; |
1312 | } | 1303 | } |
1313 | 1304 | ||
1314 | static void __net_exit sctp_net_exit(struct net *net) | 1305 | static void __net_exit sctp_defaults_exit(struct net *net) |
1315 | { | 1306 | { |
1316 | /* Free the local address list */ | 1307 | /* Free the local address list */ |
1317 | sctp_free_addr_wq(net); | 1308 | sctp_free_addr_wq(net); |
1318 | sctp_free_local_addr_list(net); | 1309 | sctp_free_local_addr_list(net); |
1319 | 1310 | ||
1320 | /* Free the control endpoint. */ | ||
1321 | inet_ctl_sock_destroy(net->sctp.ctl_sock); | ||
1322 | |||
1323 | sctp_dbg_objcnt_exit(net); | 1311 | sctp_dbg_objcnt_exit(net); |
1324 | 1312 | ||
1325 | sctp_proc_exit(net); | 1313 | sctp_proc_exit(net); |
@@ -1327,9 +1315,32 @@ static void __net_exit sctp_net_exit(struct net *net) | |||
1327 | sctp_sysctl_net_unregister(net); | 1315 | sctp_sysctl_net_unregister(net); |
1328 | } | 1316 | } |
1329 | 1317 | ||
1330 | static struct pernet_operations sctp_net_ops = { | 1318 | static struct pernet_operations sctp_defaults_ops = { |
1331 | .init = sctp_net_init, | 1319 | .init = sctp_defaults_init, |
1332 | .exit = sctp_net_exit, | 1320 | .exit = sctp_defaults_exit, |
1321 | }; | ||
1322 | |||
1323 | static int __net_init sctp_ctrlsock_init(struct net *net) | ||
1324 | { | ||
1325 | int status; | ||
1326 | |||
1327 | /* Initialize the control inode/socket for handling OOTB packets. */ | ||
1328 | status = sctp_ctl_sock_init(net); | ||
1329 | if (status) | ||
1330 | pr_err("Failed to initialize the SCTP control sock\n"); | ||
1331 | |||
1332 | return status; | ||
1333 | } | ||
1334 | |||
1335 | static void __net_init sctp_ctrlsock_exit(struct net *net) | ||
1336 | { | ||
1337 | /* Free the control endpoint. */ | ||
1338 | inet_ctl_sock_destroy(net->sctp.ctl_sock); | ||
1339 | } | ||
1340 | |||
1341 | static struct pernet_operations sctp_ctrlsock_ops = { | ||
1342 | .init = sctp_ctrlsock_init, | ||
1343 | .exit = sctp_ctrlsock_exit, | ||
1333 | }; | 1344 | }; |
1334 | 1345 | ||
1335 | /* Initialize the universe into something sensible. */ | 1346 | /* Initialize the universe into something sensible. */ |
@@ -1462,8 +1473,11 @@ static __init int sctp_init(void) | |||
1462 | sctp_v4_pf_init(); | 1473 | sctp_v4_pf_init(); |
1463 | sctp_v6_pf_init(); | 1474 | sctp_v6_pf_init(); |
1464 | 1475 | ||
1465 | status = sctp_v4_protosw_init(); | 1476 | status = register_pernet_subsys(&sctp_defaults_ops); |
1477 | if (status) | ||
1478 | goto err_register_defaults; | ||
1466 | 1479 | ||
1480 | status = sctp_v4_protosw_init(); | ||
1467 | if (status) | 1481 | if (status) |
1468 | goto err_protosw_init; | 1482 | goto err_protosw_init; |
1469 | 1483 | ||
@@ -1471,9 +1485,9 @@ static __init int sctp_init(void) | |||
1471 | if (status) | 1485 | if (status) |
1472 | goto err_v6_protosw_init; | 1486 | goto err_v6_protosw_init; |
1473 | 1487 | ||
1474 | status = register_pernet_subsys(&sctp_net_ops); | 1488 | status = register_pernet_subsys(&sctp_ctrlsock_ops); |
1475 | if (status) | 1489 | if (status) |
1476 | goto err_register_pernet_subsys; | 1490 | goto err_register_ctrlsock; |
1477 | 1491 | ||
1478 | status = sctp_v4_add_protocol(); | 1492 | status = sctp_v4_add_protocol(); |
1479 | if (status) | 1493 | if (status) |
@@ -1489,12 +1503,14 @@ out: | |||
1489 | err_v6_add_protocol: | 1503 | err_v6_add_protocol: |
1490 | sctp_v4_del_protocol(); | 1504 | sctp_v4_del_protocol(); |
1491 | err_add_protocol: | 1505 | err_add_protocol: |
1492 | unregister_pernet_subsys(&sctp_net_ops); | 1506 | unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1493 | err_register_pernet_subsys: | 1507 | err_register_ctrlsock: |
1494 | sctp_v6_protosw_exit(); | 1508 | sctp_v6_protosw_exit(); |
1495 | err_v6_protosw_init: | 1509 | err_v6_protosw_init: |
1496 | sctp_v4_protosw_exit(); | 1510 | sctp_v4_protosw_exit(); |
1497 | err_protosw_init: | 1511 | err_protosw_init: |
1512 | unregister_pernet_subsys(&sctp_defaults_ops); | ||
1513 | err_register_defaults: | ||
1498 | sctp_v4_pf_exit(); | 1514 | sctp_v4_pf_exit(); |
1499 | sctp_v6_pf_exit(); | 1515 | sctp_v6_pf_exit(); |
1500 | sctp_sysctl_unregister(); | 1516 | sctp_sysctl_unregister(); |
@@ -1527,12 +1543,14 @@ static __exit void sctp_exit(void) | |||
1527 | sctp_v6_del_protocol(); | 1543 | sctp_v6_del_protocol(); |
1528 | sctp_v4_del_protocol(); | 1544 | sctp_v4_del_protocol(); |
1529 | 1545 | ||
1530 | unregister_pernet_subsys(&sctp_net_ops); | 1546 | unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1531 | 1547 | ||
1532 | /* Free protosw registrations */ | 1548 | /* Free protosw registrations */ |
1533 | sctp_v6_protosw_exit(); | 1549 | sctp_v6_protosw_exit(); |
1534 | sctp_v4_protosw_exit(); | 1550 | sctp_v4_protosw_exit(); |
1535 | 1551 | ||
1552 | unregister_pernet_subsys(&sctp_defaults_ops); | ||
1553 | |||
1536 | /* Unregister with socket layer. */ | 1554 | /* Unregister with socket layer. */ |
1537 | sctp_v6_pf_exit(); | 1555 | sctp_v6_pf_exit(); |
1538 | sctp_v4_pf_exit(); | 1556 | sctp_v4_pf_exit(); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
244 | int error; | 244 | int error; |
245 | struct sctp_transport *transport = (struct sctp_transport *) peer; | 245 | struct sctp_transport *transport = (struct sctp_transport *) peer; |
246 | struct sctp_association *asoc = transport->asoc; | 246 | struct sctp_association *asoc = transport->asoc; |
247 | struct net *net = sock_net(asoc->base.sk); | 247 | struct sock *sk = asoc->base.sk; |
248 | struct net *net = sock_net(sk); | ||
248 | 249 | ||
249 | /* Check whether a task is in the sock. */ | 250 | /* Check whether a task is in the sock. */ |
250 | 251 | ||
251 | bh_lock_sock(asoc->base.sk); | 252 | bh_lock_sock(sk); |
252 | if (sock_owned_by_user(asoc->base.sk)) { | 253 | if (sock_owned_by_user(sk)) { |
253 | pr_debug("%s: sock is busy\n", __func__); | 254 | pr_debug("%s: sock is busy\n", __func__); |
254 | 255 | ||
255 | /* Try again later. */ | 256 | /* Try again later. */ |
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
272 | transport, GFP_ATOMIC); | 273 | transport, GFP_ATOMIC); |
273 | 274 | ||
274 | if (error) | 275 | if (error) |
275 | asoc->base.sk->sk_err = -error; | 276 | sk->sk_err = -error; |
276 | 277 | ||
277 | out_unlock: | 278 | out_unlock: |
278 | bh_unlock_sock(asoc->base.sk); | 279 | bh_unlock_sock(sk); |
279 | sctp_transport_put(transport); | 280 | sctp_transport_put(transport); |
280 | } | 281 | } |
281 | 282 | ||
@@ -285,11 +286,12 @@ out_unlock: | |||
285 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | 286 | static void sctp_generate_timeout_event(struct sctp_association *asoc, |
286 | sctp_event_timeout_t timeout_type) | 287 | sctp_event_timeout_t timeout_type) |
287 | { | 288 | { |
288 | struct net *net = sock_net(asoc->base.sk); | 289 | struct sock *sk = asoc->base.sk; |
290 | struct net *net = sock_net(sk); | ||
289 | int error = 0; | 291 | int error = 0; |
290 | 292 | ||
291 | bh_lock_sock(asoc->base.sk); | 293 | bh_lock_sock(sk); |
292 | if (sock_owned_by_user(asoc->base.sk)) { | 294 | if (sock_owned_by_user(sk)) { |
293 | pr_debug("%s: sock is busy: timer %d\n", __func__, | 295 | pr_debug("%s: sock is busy: timer %d\n", __func__, |
294 | timeout_type); | 296 | timeout_type); |
295 | 297 | ||
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
312 | (void *)timeout_type, GFP_ATOMIC); | 314 | (void *)timeout_type, GFP_ATOMIC); |
313 | 315 | ||
314 | if (error) | 316 | if (error) |
315 | asoc->base.sk->sk_err = -error; | 317 | sk->sk_err = -error; |
316 | 318 | ||
317 | out_unlock: | 319 | out_unlock: |
318 | bh_unlock_sock(asoc->base.sk); | 320 | bh_unlock_sock(sk); |
319 | sctp_association_put(asoc); | 321 | sctp_association_put(asoc); |
320 | } | 322 | } |
321 | 323 | ||
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
365 | int error = 0; | 367 | int error = 0; |
366 | struct sctp_transport *transport = (struct sctp_transport *) data; | 368 | struct sctp_transport *transport = (struct sctp_transport *) data; |
367 | struct sctp_association *asoc = transport->asoc; | 369 | struct sctp_association *asoc = transport->asoc; |
368 | struct net *net = sock_net(asoc->base.sk); | 370 | struct sock *sk = asoc->base.sk; |
371 | struct net *net = sock_net(sk); | ||
369 | 372 | ||
370 | bh_lock_sock(asoc->base.sk); | 373 | bh_lock_sock(sk); |
371 | if (sock_owned_by_user(asoc->base.sk)) { | 374 | if (sock_owned_by_user(sk)) { |
372 | pr_debug("%s: sock is busy\n", __func__); | 375 | pr_debug("%s: sock is busy\n", __func__); |
373 | 376 | ||
374 | /* Try again later. */ | 377 | /* Try again later. */ |
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
388 | asoc->state, asoc->ep, asoc, | 391 | asoc->state, asoc->ep, asoc, |
389 | transport, GFP_ATOMIC); | 392 | transport, GFP_ATOMIC); |
390 | 393 | ||
391 | if (error) | 394 | if (error) |
392 | asoc->base.sk->sk_err = -error; | 395 | sk->sk_err = -error; |
393 | 396 | ||
394 | out_unlock: | 397 | out_unlock: |
395 | bh_unlock_sock(asoc->base.sk); | 398 | bh_unlock_sock(sk); |
396 | sctp_transport_put(transport); | 399 | sctp_transport_put(transport); |
397 | } | 400 | } |
398 | 401 | ||
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
403 | { | 406 | { |
404 | struct sctp_transport *transport = (struct sctp_transport *) data; | 407 | struct sctp_transport *transport = (struct sctp_transport *) data; |
405 | struct sctp_association *asoc = transport->asoc; | 408 | struct sctp_association *asoc = transport->asoc; |
406 | struct net *net = sock_net(asoc->base.sk); | 409 | struct sock *sk = asoc->base.sk; |
410 | struct net *net = sock_net(sk); | ||
407 | 411 | ||
408 | bh_lock_sock(asoc->base.sk); | 412 | bh_lock_sock(sk); |
409 | if (sock_owned_by_user(asoc->base.sk)) { | 413 | if (sock_owned_by_user(sk)) { |
410 | pr_debug("%s: sock is busy\n", __func__); | 414 | pr_debug("%s: sock is busy\n", __func__); |
411 | 415 | ||
412 | /* Try again later. */ | 416 | /* Try again later. */ |
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | 431 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); |
428 | 432 | ||
429 | out_unlock: | 433 | out_unlock: |
430 | bh_unlock_sock(asoc->base.sk); | 434 | bh_unlock_sock(sk); |
431 | sctp_association_put(asoc); | 435 | sctp_association_put(asoc); |
432 | } | 436 | } |
433 | 437 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b140c092d226..f14f24ee9983 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -297,7 +297,7 @@ static int rpc_complete_task(struct rpc_task *task) | |||
297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
298 | ret = atomic_dec_and_test(&task->tk_count); | 298 | ret = atomic_dec_and_test(&task->tk_count); |
299 | if (waitqueue_active(wq)) | 299 | if (waitqueue_active(wq)) |
300 | __wake_up_locked_key(wq, TASK_NORMAL, 1, &k); | 300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); |
301 | spin_unlock_irqrestore(&wq->lock, flags); | 301 | spin_unlock_irqrestore(&wq->lock, flags); |
302 | return ret; | 302 | return ret; |
303 | } | 303 | } |
@@ -1092,14 +1092,10 @@ void | |||
1092 | rpc_destroy_mempool(void) | 1092 | rpc_destroy_mempool(void) |
1093 | { | 1093 | { |
1094 | rpciod_stop(); | 1094 | rpciod_stop(); |
1095 | if (rpc_buffer_mempool) | 1095 | mempool_destroy(rpc_buffer_mempool); |
1096 | mempool_destroy(rpc_buffer_mempool); | 1096 | mempool_destroy(rpc_task_mempool); |
1097 | if (rpc_task_mempool) | 1097 | kmem_cache_destroy(rpc_task_slabp); |
1098 | mempool_destroy(rpc_task_mempool); | 1098 | kmem_cache_destroy(rpc_buffer_slabp); |
1099 | if (rpc_task_slabp) | ||
1100 | kmem_cache_destroy(rpc_task_slabp); | ||
1101 | if (rpc_buffer_slabp) | ||
1102 | kmem_cache_destroy(rpc_buffer_slabp); | ||
1103 | rpc_destroy_wait_queue(&delay_queue); | 1099 | rpc_destroy_wait_queue(&delay_queue); |
1104 | } | 1100 | } |
1105 | 1101 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ab5dd621ae0c..2e98f4a243e5 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work) | |||
614 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 614 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
615 | xprt->ops->close(xprt); | 615 | xprt->ops->close(xprt); |
616 | xprt_release_write(xprt, NULL); | 616 | xprt_release_write(xprt, NULL); |
617 | wake_up_bit(&xprt->state, XPRT_LOCKED); | ||
617 | } | 618 | } |
618 | 619 | ||
619 | /** | 620 | /** |
@@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |||
723 | xprt->ops->release_xprt(xprt, NULL); | 724 | xprt->ops->release_xprt(xprt, NULL); |
724 | out: | 725 | out: |
725 | spin_unlock_bh(&xprt->transport_lock); | 726 | spin_unlock_bh(&xprt->transport_lock); |
727 | wake_up_bit(&xprt->state, XPRT_LOCKED); | ||
726 | } | 728 | } |
727 | 729 | ||
728 | /** | 730 | /** |
@@ -1394,6 +1396,10 @@ out: | |||
1394 | static void xprt_destroy(struct rpc_xprt *xprt) | 1396 | static void xprt_destroy(struct rpc_xprt *xprt) |
1395 | { | 1397 | { |
1396 | dprintk("RPC: destroying transport %p\n", xprt); | 1398 | dprintk("RPC: destroying transport %p\n", xprt); |
1399 | |||
1400 | /* Exclude transport connect/disconnect handlers */ | ||
1401 | wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); | ||
1402 | |||
1397 | del_timer_sync(&xprt->timer); | 1403 | del_timer_sync(&xprt->timer); |
1398 | 1404 | ||
1399 | rpc_xprt_debugfs_unregister(xprt); | 1405 | rpc_xprt_debugfs_unregister(xprt); |
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -39,25 +39,6 @@ static int | |||
39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
40 | struct rpcrdma_create_data_internal *cdata) | 40 | struct rpcrdma_create_data_internal *cdata) |
41 | { | 41 | { |
42 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
43 | struct ib_mr *mr; | ||
44 | |||
45 | /* Obtain an lkey to use for the regbufs, which are | ||
46 | * protected from remote access. | ||
47 | */ | ||
48 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { | ||
49 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
50 | } else { | ||
51 | mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); | ||
52 | if (IS_ERR(mr)) { | ||
53 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | ||
54 | __func__, PTR_ERR(mr)); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
58 | ia->ri_dma_mr = mr; | ||
59 | } | ||
60 | |||
61 | return 0; | 42 | return 0; |
62 | } | 43 | } |
63 | 44 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
189 | struct ib_device_attr *devattr = &ia->ri_devattr; | 189 | struct ib_device_attr *devattr = &ia->ri_devattr; |
190 | int depth, delta; | 190 | int depth, delta; |
191 | 191 | ||
192 | /* Obtain an lkey to use for the regbufs, which are | ||
193 | * protected from remote access. | ||
194 | */ | ||
195 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
196 | |||
197 | ia->ri_max_frmr_depth = | 192 | ia->ri_max_frmr_depth = |
198 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | 193 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
199 | devattr->max_fast_reg_page_list_len); | 194 | devattr->max_fast_reg_page_list_len); |
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
@@ -23,7 +23,6 @@ static int | |||
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
24 | struct rpcrdma_create_data_internal *cdata) | 24 | struct rpcrdma_create_data_internal *cdata) |
25 | { | 25 | { |
26 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
27 | struct ib_mr *mr; | 26 | struct ib_mr *mr; |
28 | 27 | ||
29 | /* Obtain an rkey to use for RPC data payloads. | 28 | /* Obtain an rkey to use for RPC data payloads. |
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
37 | __func__, PTR_ERR(mr)); | 36 | __func__, PTR_ERR(mr)); |
38 | return -ENOMEM; | 37 | return -ENOMEM; |
39 | } | 38 | } |
40 | ia->ri_dma_mr = mr; | ||
41 | |||
42 | /* Obtain an lkey to use for regbufs. | ||
43 | */ | ||
44 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) | ||
45 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
46 | else | ||
47 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
48 | 39 | ||
40 | ia->ri_dma_mr = mr; | ||
49 | return 0; | 41 | return 0; |
50 | } | 42 | } |
51 | 43 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index cb5174284074..f0c3ff67ca98 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, | |||
136 | ctxt->direction = DMA_FROM_DEVICE; | 136 | ctxt->direction = DMA_FROM_DEVICE; |
137 | ctxt->read_hdr = head; | 137 | ctxt->read_hdr = head; |
138 | pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); | 138 | pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); |
139 | read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); | 139 | read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, |
140 | rs_length); | ||
140 | 141 | ||
141 | for (pno = 0; pno < pages_needed; pno++) { | 142 | for (pno = 0; pno < pages_needed; pno++) { |
142 | int len = min_t(int, rs_length, PAGE_SIZE - pg_off); | 143 | int len = min_t(int, rs_length, PAGE_SIZE - pg_off); |
@@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, | |||
235 | ctxt->direction = DMA_FROM_DEVICE; | 236 | ctxt->direction = DMA_FROM_DEVICE; |
236 | ctxt->frmr = frmr; | 237 | ctxt->frmr = frmr; |
237 | pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); | 238 | pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); |
238 | read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); | 239 | read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, |
240 | rs_length); | ||
239 | 241 | ||
240 | frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); | 242 | frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); |
241 | frmr->direction = DMA_FROM_DEVICE; | 243 | frmr->direction = DMA_FROM_DEVICE; |
@@ -531,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
531 | rqstp->rq_arg.page_base = head->arg.page_base; | 533 | rqstp->rq_arg.page_base = head->arg.page_base; |
532 | 534 | ||
533 | /* rq_respages starts after the last arg page */ | 535 | /* rq_respages starts after the last arg page */ |
534 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; | 536 | rqstp->rq_respages = &rqstp->rq_pages[page_no]; |
535 | rqstp->rq_next_page = rqstp->rq_respages + 1; | 537 | rqstp->rq_next_page = rqstp->rq_respages + 1; |
536 | 538 | ||
537 | /* Rebuild rq_arg head and tail. */ | 539 | /* Rebuild rq_arg head and tail. */ |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 64443eb754ad..41e452bc580c 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) | |||
270 | 270 | ||
271 | xprt_clear_connected(xprt); | 271 | xprt_clear_connected(xprt); |
272 | 272 | ||
273 | rpcrdma_buffer_destroy(&r_xprt->rx_buf); | ||
274 | rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); | 273 | rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); |
274 | rpcrdma_buffer_destroy(&r_xprt->rx_buf); | ||
275 | rpcrdma_ia_close(&r_xprt->rx_ia); | 275 | rpcrdma_ia_close(&r_xprt->rx_ia); |
276 | 276 | ||
277 | xprt_rdma_free_addresses(xprt); | 277 | xprt_rdma_free_addresses(xprt); |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..5502d4dade74 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -543,11 +543,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
543 | } | 543 | } |
544 | 544 | ||
545 | if (memreg == RPCRDMA_FRMR) { | 545 | if (memreg == RPCRDMA_FRMR) { |
546 | /* Requires both frmr reg and local dma lkey */ | 546 | if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || |
547 | if (((devattr->device_cap_flags & | 547 | (devattr->max_fast_reg_page_list_len == 0)) { |
548 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != | ||
549 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || | ||
550 | (devattr->max_fast_reg_page_list_len == 0)) { | ||
551 | dprintk("RPC: %s: FRMR registration " | 548 | dprintk("RPC: %s: FRMR registration " |
552 | "not supported by HCA\n", __func__); | 549 | "not supported by HCA\n", __func__); |
553 | memreg = RPCRDMA_MTHCAFMR; | 550 | memreg = RPCRDMA_MTHCAFMR; |
@@ -557,6 +554,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
557 | if (!ia->ri_device->alloc_fmr) { | 554 | if (!ia->ri_device->alloc_fmr) { |
558 | dprintk("RPC: %s: MTHCAFMR registration " | 555 | dprintk("RPC: %s: MTHCAFMR registration " |
559 | "not supported by HCA\n", __func__); | 556 | "not supported by HCA\n", __func__); |
557 | rc = -EINVAL; | ||
560 | goto out3; | 558 | goto out3; |
561 | } | 559 | } |
562 | } | 560 | } |
@@ -755,19 +753,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |||
755 | 753 | ||
756 | cancel_delayed_work_sync(&ep->rep_connect_worker); | 754 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
757 | 755 | ||
758 | if (ia->ri_id->qp) { | 756 | if (ia->ri_id->qp) |
759 | rpcrdma_ep_disconnect(ep, ia); | 757 | rpcrdma_ep_disconnect(ep, ia); |
758 | |||
759 | rpcrdma_clean_cq(ep->rep_attr.recv_cq); | ||
760 | rpcrdma_clean_cq(ep->rep_attr.send_cq); | ||
761 | |||
762 | if (ia->ri_id->qp) { | ||
760 | rdma_destroy_qp(ia->ri_id); | 763 | rdma_destroy_qp(ia->ri_id); |
761 | ia->ri_id->qp = NULL; | 764 | ia->ri_id->qp = NULL; |
762 | } | 765 | } |
763 | 766 | ||
764 | rpcrdma_clean_cq(ep->rep_attr.recv_cq); | ||
765 | rc = ib_destroy_cq(ep->rep_attr.recv_cq); | 767 | rc = ib_destroy_cq(ep->rep_attr.recv_cq); |
766 | if (rc) | 768 | if (rc) |
767 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", | 769 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", |
768 | __func__, rc); | 770 | __func__, rc); |
769 | 771 | ||
770 | rpcrdma_clean_cq(ep->rep_attr.send_cq); | ||
771 | rc = ib_destroy_cq(ep->rep_attr.send_cq); | 772 | rc = ib_destroy_cq(ep->rep_attr.send_cq); |
772 | if (rc) | 773 | if (rc) |
773 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", | 774 | dprintk("RPC: %s: ib_destroy_cq returned %i\n", |
@@ -1252,7 +1253,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) | |||
1252 | goto out_free; | 1253 | goto out_free; |
1253 | 1254 | ||
1254 | iov->length = size; | 1255 | iov->length = size; |
1255 | iov->lkey = ia->ri_dma_lkey; | 1256 | iov->lkey = ia->ri_pd->local_dma_lkey; |
1256 | rb->rg_size = size; | 1257 | rb->rg_size = size; |
1257 | rb->rg_owner = NULL; | 1258 | rb->rg_owner = NULL; |
1258 | return rb; | 1259 | return rb; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -65,7 +65,6 @@ struct rpcrdma_ia { | |||
65 | struct rdma_cm_id *ri_id; | 65 | struct rdma_cm_id *ri_id; |
66 | struct ib_pd *ri_pd; | 66 | struct ib_pd *ri_pd; |
67 | struct ib_mr *ri_dma_mr; | 67 | struct ib_mr *ri_dma_mr; |
68 | u32 ri_dma_lkey; | ||
69 | struct completion ri_done; | 68 | struct completion ri_done; |
70 | int ri_async_rc; | 69 | int ri_async_rc; |
71 | unsigned int ri_max_frmr_depth; | 70 | unsigned int ri_max_frmr_depth; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7be90bc1a7c2..1a85e0ed0b48 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt) | |||
777 | xs_sock_reset_connection_flags(xprt); | 777 | xs_sock_reset_connection_flags(xprt); |
778 | /* Mark transport as closed and wake up all pending tasks */ | 778 | /* Mark transport as closed and wake up all pending tasks */ |
779 | xprt_disconnect_done(xprt); | 779 | xprt_disconnect_done(xprt); |
780 | xprt_force_disconnect(xprt); | ||
781 | } | 780 | } |
782 | 781 | ||
783 | /** | 782 | /** |
@@ -881,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt) | |||
881 | */ | 880 | */ |
882 | static void xs_destroy(struct rpc_xprt *xprt) | 881 | static void xs_destroy(struct rpc_xprt *xprt) |
883 | { | 882 | { |
883 | struct sock_xprt *transport = container_of(xprt, | ||
884 | struct sock_xprt, xprt); | ||
884 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 885 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
885 | 886 | ||
887 | cancel_delayed_work_sync(&transport->connect_worker); | ||
886 | xs_close(xprt); | 888 | xs_close(xprt); |
887 | xs_xprt_free(xprt); | 889 | xs_xprt_free(xprt); |
888 | module_put(THIS_MODULE); | 890 | module_put(THIS_MODULE); |
@@ -1435,6 +1437,7 @@ out: | |||
1435 | static void xs_tcp_state_change(struct sock *sk) | 1437 | static void xs_tcp_state_change(struct sock *sk) |
1436 | { | 1438 | { |
1437 | struct rpc_xprt *xprt; | 1439 | struct rpc_xprt *xprt; |
1440 | struct sock_xprt *transport; | ||
1438 | 1441 | ||
1439 | read_lock_bh(&sk->sk_callback_lock); | 1442 | read_lock_bh(&sk->sk_callback_lock); |
1440 | if (!(xprt = xprt_from_sock(sk))) | 1443 | if (!(xprt = xprt_from_sock(sk))) |
@@ -1446,13 +1449,12 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1446 | sock_flag(sk, SOCK_ZAPPED), | 1449 | sock_flag(sk, SOCK_ZAPPED), |
1447 | sk->sk_shutdown); | 1450 | sk->sk_shutdown); |
1448 | 1451 | ||
1452 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1449 | trace_rpc_socket_state_change(xprt, sk->sk_socket); | 1453 | trace_rpc_socket_state_change(xprt, sk->sk_socket); |
1450 | switch (sk->sk_state) { | 1454 | switch (sk->sk_state) { |
1451 | case TCP_ESTABLISHED: | 1455 | case TCP_ESTABLISHED: |
1452 | spin_lock(&xprt->transport_lock); | 1456 | spin_lock(&xprt->transport_lock); |
1453 | if (!xprt_test_and_set_connected(xprt)) { | 1457 | if (!xprt_test_and_set_connected(xprt)) { |
1454 | struct sock_xprt *transport = container_of(xprt, | ||
1455 | struct sock_xprt, xprt); | ||
1456 | 1458 | ||
1457 | /* Reset TCP record info */ | 1459 | /* Reset TCP record info */ |
1458 | transport->tcp_offset = 0; | 1460 | transport->tcp_offset = 0; |
@@ -1461,6 +1463,8 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1461 | transport->tcp_flags = | 1463 | transport->tcp_flags = |
1462 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; | 1464 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; |
1463 | xprt->connect_cookie++; | 1465 | xprt->connect_cookie++; |
1466 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | ||
1467 | xprt_clear_connecting(xprt); | ||
1464 | 1468 | ||
1465 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 1469 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
1466 | } | 1470 | } |
@@ -1496,6 +1500,9 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1496 | smp_mb__after_atomic(); | 1500 | smp_mb__after_atomic(); |
1497 | break; | 1501 | break; |
1498 | case TCP_CLOSE: | 1502 | case TCP_CLOSE: |
1503 | if (test_and_clear_bit(XPRT_SOCK_CONNECTING, | ||
1504 | &transport->sock_state)) | ||
1505 | xprt_clear_connecting(xprt); | ||
1499 | xs_sock_mark_closed(xprt); | 1506 | xs_sock_mark_closed(xprt); |
1500 | } | 1507 | } |
1501 | out: | 1508 | out: |
@@ -2179,6 +2186,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2179 | /* Tell the socket layer to start connecting... */ | 2186 | /* Tell the socket layer to start connecting... */ |
2180 | xprt->stat.connect_count++; | 2187 | xprt->stat.connect_count++; |
2181 | xprt->stat.connect_start = jiffies; | 2188 | xprt->stat.connect_start = jiffies; |
2189 | set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | ||
2182 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); | 2190 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); |
2183 | switch (ret) { | 2191 | switch (ret) { |
2184 | case 0: | 2192 | case 0: |
@@ -2240,7 +2248,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2240 | case -EINPROGRESS: | 2248 | case -EINPROGRESS: |
2241 | case -EALREADY: | 2249 | case -EALREADY: |
2242 | xprt_unlock_connect(xprt, transport); | 2250 | xprt_unlock_connect(xprt, transport); |
2243 | xprt_clear_connecting(xprt); | ||
2244 | return; | 2251 | return; |
2245 | case -EINVAL: | 2252 | case -EINVAL: |
2246 | /* Happens, for instance, if the user specified a link | 2253 | /* Happens, for instance, if the user specified a link |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 562c926a51cc..c5ac436235e0 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -539,6 +539,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
539 | *err = -TIPC_ERR_NO_NAME; | 539 | *err = -TIPC_ERR_NO_NAME; |
540 | if (skb_linearize(skb)) | 540 | if (skb_linearize(skb)) |
541 | return false; | 541 | return false; |
542 | msg = buf_msg(skb); | ||
542 | if (msg_reroute_cnt(msg)) | 543 | if (msg_reroute_cnt(msg)) |
543 | return false; | 544 | return false; |
544 | dnode = addr_domain(net, msg_lookup_scope(msg)); | 545 | dnode = addr_domain(net, msg_lookup_scope(msg)); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..ef31b40ad550 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2179,8 +2179,21 @@ unlock: | |||
2179 | if (UNIXCB(skb).fp) | 2179 | if (UNIXCB(skb).fp) |
2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); | 2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); |
2181 | 2181 | ||
2182 | sk_peek_offset_fwd(sk, chunk); | 2182 | if (skip) { |
2183 | sk_peek_offset_fwd(sk, chunk); | ||
2184 | skip -= chunk; | ||
2185 | } | ||
2183 | 2186 | ||
2187 | if (UNIXCB(skb).fp) | ||
2188 | break; | ||
2189 | |||
2190 | last = skb; | ||
2191 | last_len = skb->len; | ||
2192 | unix_state_lock(sk); | ||
2193 | skb = skb_peek_next(skb, &sk->sk_receive_queue); | ||
2194 | if (skb) | ||
2195 | goto again; | ||
2196 | unix_state_unlock(sk); | ||
2184 | break; | 2197 | break; |
2185 | } | 2198 | } |
2186 | } while (size); | 2199 | } while (size); |
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * Here's a sample kernel module showing the use of jprobes to dump | 2 | * Here's a sample kernel module showing the use of jprobes to dump |
3 | * the arguments of do_fork(). | 3 | * the arguments of _do_fork(). |
4 | * | 4 | * |
5 | * For more information on theory of operation of jprobes, see | 5 | * For more information on theory of operation of jprobes, see |
6 | * Documentation/kprobes.txt | 6 | * Documentation/kprobes.txt |
7 | * | 7 | * |
8 | * Build and insert the kernel module as done in the kprobe example. | 8 | * Build and insert the kernel module as done in the kprobe example. |
9 | * You will see the trace data in /var/log/messages and on the | 9 | * You will see the trace data in /var/log/messages and on the |
10 | * console whenever do_fork() is invoked to create a new process. | 10 | * console whenever _do_fork() is invoked to create a new process. |
11 | * (Some messages may be suppressed if syslogd is configured to | 11 | * (Some messages may be suppressed if syslogd is configured to |
12 | * eliminate duplicate messages.) | 12 | * eliminate duplicate messages.) |
13 | */ | 13 | */ |
@@ -17,13 +17,13 @@ | |||
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Jumper probe for do_fork. | 20 | * Jumper probe for _do_fork. |
21 | * Mirror principle enables access to arguments of the probed routine | 21 | * Mirror principle enables access to arguments of the probed routine |
22 | * from the probe handler. | 22 | * from the probe handler. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* Proxy routine having the same arguments as actual do_fork() routine */ | 25 | /* Proxy routine having the same arguments as actual _do_fork() routine */ |
26 | static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | 26 | static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, |
27 | unsigned long stack_size, int __user *parent_tidptr, | 27 | unsigned long stack_size, int __user *parent_tidptr, |
28 | int __user *child_tidptr) | 28 | int __user *child_tidptr) |
29 | { | 29 | { |
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static struct jprobe my_jprobe = { | 38 | static struct jprobe my_jprobe = { |
39 | .entry = jdo_fork, | 39 | .entry = j_do_fork, |
40 | .kp = { | 40 | .kp = { |
41 | .symbol_name = "do_fork", | 41 | .symbol_name = "_do_fork", |
42 | }, | 42 | }, |
43 | }; | 43 | }; |
44 | 44 | ||
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * NOTE: This example is works on x86 and powerpc. | 2 | * NOTE: This example is works on x86 and powerpc. |
3 | * Here's a sample kernel module showing the use of kprobes to dump a | 3 | * Here's a sample kernel module showing the use of kprobes to dump a |
4 | * stack trace and selected registers when do_fork() is called. | 4 | * stack trace and selected registers when _do_fork() is called. |
5 | * | 5 | * |
6 | * For more information on theory of operation of kprobes, see | 6 | * For more information on theory of operation of kprobes, see |
7 | * Documentation/kprobes.txt | 7 | * Documentation/kprobes.txt |
8 | * | 8 | * |
9 | * You will see the trace data in /var/log/messages and on the console | 9 | * You will see the trace data in /var/log/messages and on the console |
10 | * whenever do_fork() is invoked to create a new process. | 10 | * whenever _do_fork() is invoked to create a new process. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | /* For each probe you need to allocate a kprobe structure */ | 17 | /* For each probe you need to allocate a kprobe structure */ |
18 | static struct kprobe kp = { | 18 | static struct kprobe kp = { |
19 | .symbol_name = "do_fork", | 19 | .symbol_name = "_do_fork", |
20 | }; | 20 | }; |
21 | 21 | ||
22 | /* kprobe pre_handler: called just before the probed instruction is executed */ | 22 | /* kprobe pre_handler: called just before the probed instruction is executed */ |
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * usage: insmod kretprobe_example.ko func=<func_name> | 8 | * usage: insmod kretprobe_example.ko func=<func_name> |
9 | * | 9 | * |
10 | * If no func_name is specified, do_fork is instrumented | 10 | * If no func_name is specified, _do_fork is instrumented |
11 | * | 11 | * |
12 | * For more information on theory of operation of kretprobes, see | 12 | * For more information on theory of operation of kretprobes, see |
13 | * Documentation/kprobes.txt | 13 | * Documentation/kprobes.txt |
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/limits.h> | 25 | #include <linux/limits.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | 27 | ||
28 | static char func_name[NAME_MAX] = "do_fork"; | 28 | static char func_name[NAME_MAX] = "_do_fork"; |
29 | module_param_string(func, func_name, NAME_MAX, S_IRUGO); | 29 | module_param_string(func, func_name, NAME_MAX, S_IRUGO); |
30 | MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" | 30 | MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" |
31 | " function's execution time"); | 31 | " function's execution time"); |
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 10d23ca9f617..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c | |||
@@ -1,15 +1,15 @@ | |||
1 | /* Extract X.509 certificate in DER form from PKCS#11 or PEM. | 1 | /* Extract X.509 certificate in DER form from PKCS#11 or PEM. |
2 | * | 2 | * |
3 | * Copyright © 2014 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. |
4 | * Copyright © 2015 Intel Corporation. | 4 | * Copyright © 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * Authors: David Howells <dhowells@redhat.com> | 6 | * Authors: David Howells <dhowells@redhat.com> |
7 | * David Woodhouse <dwmw2@infradead.org> | 7 | * David Woodhouse <dwmw2@infradead.org> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public Licence | 10 | * modify it under the terms of the GNU Lesser General Public License |
11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version 2.1 |
12 | * 2 of the Licence, or (at your option) any later version. | 12 | * of the licence, or (at your option) any later version. |
13 | */ | 13 | */ |
14 | #define _GNU_SOURCE | 14 | #define _GNU_SOURCE |
15 | #include <stdio.h> | 15 | #include <stdio.h> |
@@ -17,13 +17,9 @@ | |||
17 | #include <stdint.h> | 17 | #include <stdint.h> |
18 | #include <stdbool.h> | 18 | #include <stdbool.h> |
19 | #include <string.h> | 19 | #include <string.h> |
20 | #include <getopt.h> | ||
21 | #include <err.h> | 20 | #include <err.h> |
22 | #include <arpa/inet.h> | ||
23 | #include <openssl/bio.h> | 21 | #include <openssl/bio.h> |
24 | #include <openssl/evp.h> | ||
25 | #include <openssl/pem.h> | 22 | #include <openssl/pem.h> |
26 | #include <openssl/pkcs7.h> | ||
27 | #include <openssl/err.h> | 23 | #include <openssl/err.h> |
28 | #include <openssl/engine.h> | 24 | #include <openssl/engine.h> |
29 | 25 | ||
diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 0cd46e129920..b967e4f9fed2 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb | |||
@@ -115,7 +115,7 @@ esac | |||
115 | BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" | 115 | BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" |
116 | 116 | ||
117 | # Setup the directory structure | 117 | # Setup the directory structure |
118 | rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" | 118 | rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files |
119 | mkdir -m 755 -p "$tmpdir/DEBIAN" | 119 | mkdir -m 755 -p "$tmpdir/DEBIAN" |
120 | mkdir -p "$tmpdir/lib" "$tmpdir/boot" | 120 | mkdir -p "$tmpdir/lib" "$tmpdir/boot" |
121 | mkdir -p "$fwdir/lib/firmware/$version/" | 121 | mkdir -p "$fwdir/lib/firmware/$version/" |
@@ -408,7 +408,7 @@ binary-arch: | |||
408 | \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg | 408 | \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg |
409 | 409 | ||
410 | clean: | 410 | clean: |
411 | rm -rf debian/*tmp | 411 | rm -rf debian/*tmp debian/files |
412 | mv debian/ debian.backup # debian/ might be cleaned away | 412 | mv debian/ debian.backup # debian/ might be cleaned away |
413 | \$(MAKE) clean | 413 | \$(MAKE) clean |
414 | mv debian.backup debian | 414 | mv debian.backup debian |
diff --git a/scripts/sign-file.c b/scripts/sign-file.c index 058bba3103e2..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c | |||
@@ -1,12 +1,15 @@ | |||
1 | /* Sign a module file using the given key. | 1 | /* Sign a module file using the given key. |
2 | * | 2 | * |
3 | * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Copyright © 2015 Intel Corporation. |
5 | * | ||
6 | * Authors: David Howells <dhowells@redhat.com> | ||
7 | * David Woodhouse <dwmw2@infradead.org> | ||
5 | * | 8 | * |
6 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public Licence | 10 | * modify it under the terms of the GNU Lesser General Public License |
8 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version 2.1 |
9 | * 2 of the Licence, or (at your option) any later version. | 12 | * of the licence, or (at your option) any later version. |
10 | */ | 13 | */ |
11 | #define _GNU_SOURCE | 14 | #define _GNU_SOURCE |
12 | #include <stdio.h> | 15 | #include <stdio.h> |
@@ -17,13 +20,34 @@ | |||
17 | #include <getopt.h> | 20 | #include <getopt.h> |
18 | #include <err.h> | 21 | #include <err.h> |
19 | #include <arpa/inet.h> | 22 | #include <arpa/inet.h> |
23 | #include <openssl/opensslv.h> | ||
20 | #include <openssl/bio.h> | 24 | #include <openssl/bio.h> |
21 | #include <openssl/evp.h> | 25 | #include <openssl/evp.h> |
22 | #include <openssl/pem.h> | 26 | #include <openssl/pem.h> |
23 | #include <openssl/cms.h> | ||
24 | #include <openssl/err.h> | 27 | #include <openssl/err.h> |
25 | #include <openssl/engine.h> | 28 | #include <openssl/engine.h> |
26 | 29 | ||
30 | /* | ||
31 | * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to | ||
32 | * assume that it's not available and its header file is missing and that we | ||
33 | * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts | ||
34 | * the options we have on specifying the X.509 certificate we want. | ||
35 | * | ||
36 | * Further, older versions of OpenSSL don't support manually adding signers to | ||
37 | * the PKCS#7 message so have to accept that we get a certificate included in | ||
38 | * the signature message. Nor do such older versions of OpenSSL support | ||
39 | * signing with anything other than SHA1 - so we're stuck with that if such is | ||
40 | * the case. | ||
41 | */ | ||
42 | #if OPENSSL_VERSION_NUMBER < 0x10000000L | ||
43 | #define USE_PKCS7 | ||
44 | #endif | ||
45 | #ifndef USE_PKCS7 | ||
46 | #include <openssl/cms.h> | ||
47 | #else | ||
48 | #include <openssl/pkcs7.h> | ||
49 | #endif | ||
50 | |||
27 | struct module_signature { | 51 | struct module_signature { |
28 | uint8_t algo; /* Public-key crypto algorithm [0] */ | 52 | uint8_t algo; /* Public-key crypto algorithm [0] */ |
29 | uint8_t hash; /* Digest algorithm [0] */ | 53 | uint8_t hash; /* Digest algorithm [0] */ |
@@ -107,30 +131,42 @@ int main(int argc, char **argv) | |||
107 | struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; | 131 | struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; |
108 | char *hash_algo = NULL; | 132 | char *hash_algo = NULL; |
109 | char *private_key_name, *x509_name, *module_name, *dest_name; | 133 | char *private_key_name, *x509_name, *module_name, *dest_name; |
110 | bool save_cms = false, replace_orig; | 134 | bool save_sig = false, replace_orig; |
111 | bool sign_only = false; | 135 | bool sign_only = false; |
112 | unsigned char buf[4096]; | 136 | unsigned char buf[4096]; |
113 | unsigned long module_size, cms_size; | 137 | unsigned long module_size, sig_size; |
114 | unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; | 138 | unsigned int use_signed_attrs; |
115 | const EVP_MD *digest_algo; | 139 | const EVP_MD *digest_algo; |
116 | EVP_PKEY *private_key; | 140 | EVP_PKEY *private_key; |
141 | #ifndef USE_PKCS7 | ||
117 | CMS_ContentInfo *cms; | 142 | CMS_ContentInfo *cms; |
143 | unsigned int use_keyid = 0; | ||
144 | #else | ||
145 | PKCS7 *pkcs7; | ||
146 | #endif | ||
118 | X509 *x509; | 147 | X509 *x509; |
119 | BIO *b, *bd = NULL, *bm; | 148 | BIO *b, *bd = NULL, *bm; |
120 | int opt, n; | 149 | int opt, n; |
121 | |||
122 | OpenSSL_add_all_algorithms(); | 150 | OpenSSL_add_all_algorithms(); |
123 | ERR_load_crypto_strings(); | 151 | ERR_load_crypto_strings(); |
124 | ERR_clear_error(); | 152 | ERR_clear_error(); |
125 | 153 | ||
126 | key_pass = getenv("KBUILD_SIGN_PIN"); | 154 | key_pass = getenv("KBUILD_SIGN_PIN"); |
127 | 155 | ||
156 | #ifndef USE_PKCS7 | ||
157 | use_signed_attrs = CMS_NOATTR; | ||
158 | #else | ||
159 | use_signed_attrs = PKCS7_NOATTR; | ||
160 | #endif | ||
161 | |||
128 | do { | 162 | do { |
129 | opt = getopt(argc, argv, "dpk"); | 163 | opt = getopt(argc, argv, "dpk"); |
130 | switch (opt) { | 164 | switch (opt) { |
131 | case 'p': save_cms = true; break; | 165 | case 'p': save_sig = true; break; |
132 | case 'd': sign_only = true; save_cms = true; break; | 166 | case 'd': sign_only = true; save_sig = true; break; |
167 | #ifndef USE_PKCS7 | ||
133 | case 'k': use_keyid = CMS_USE_KEYID; break; | 168 | case 'k': use_keyid = CMS_USE_KEYID; break; |
169 | #endif | ||
134 | case -1: break; | 170 | case -1: break; |
135 | default: format(); | 171 | default: format(); |
136 | } | 172 | } |
@@ -154,6 +190,14 @@ int main(int argc, char **argv) | |||
154 | replace_orig = true; | 190 | replace_orig = true; |
155 | } | 191 | } |
156 | 192 | ||
193 | #ifdef USE_PKCS7 | ||
194 | if (strcmp(hash_algo, "sha1") != 0) { | ||
195 | fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", | ||
196 | OPENSSL_VERSION_TEXT); | ||
197 | exit(3); | ||
198 | } | ||
199 | #endif | ||
200 | |||
157 | /* Read the private key and the X.509 cert the PKCS#7 message | 201 | /* Read the private key and the X.509 cert the PKCS#7 message |
158 | * will point to. | 202 | * will point to. |
159 | */ | 203 | */ |
@@ -210,7 +254,8 @@ int main(int argc, char **argv) | |||
210 | bm = BIO_new_file(module_name, "rb"); | 254 | bm = BIO_new_file(module_name, "rb"); |
211 | ERR(!bm, "%s", module_name); | 255 | ERR(!bm, "%s", module_name); |
212 | 256 | ||
213 | /* Load the CMS message from the digest buffer. */ | 257 | #ifndef USE_PKCS7 |
258 | /* Load the signature message from the digest buffer. */ | ||
214 | cms = CMS_sign(NULL, NULL, NULL, NULL, | 259 | cms = CMS_sign(NULL, NULL, NULL, NULL, |
215 | CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); | 260 | CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); |
216 | ERR(!cms, "CMS_sign"); | 261 | ERR(!cms, "CMS_sign"); |
@@ -218,17 +263,31 @@ int main(int argc, char **argv) | |||
218 | ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, | 263 | ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, |
219 | CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | | 264 | CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | |
220 | use_keyid | use_signed_attrs), | 265 | use_keyid | use_signed_attrs), |
221 | "CMS_sign_add_signer"); | 266 | "CMS_add1_signer"); |
222 | ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, | 267 | ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, |
223 | "CMS_final"); | 268 | "CMS_final"); |
224 | 269 | ||
225 | if (save_cms) { | 270 | #else |
226 | char *cms_name; | 271 | pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, |
272 | PKCS7_NOCERTS | PKCS7_BINARY | | ||
273 | PKCS7_DETACHED | use_signed_attrs); | ||
274 | ERR(!pkcs7, "PKCS7_sign"); | ||
275 | #endif | ||
276 | |||
277 | if (save_sig) { | ||
278 | char *sig_file_name; | ||
227 | 279 | ||
228 | ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); | 280 | ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, |
229 | b = BIO_new_file(cms_name, "wb"); | 281 | "asprintf"); |
230 | ERR(!b, "%s", cms_name); | 282 | b = BIO_new_file(sig_file_name, "wb"); |
231 | ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); | 283 | ERR(!b, "%s", sig_file_name); |
284 | #ifndef USE_PKCS7 | ||
285 | ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, | ||
286 | "%s", sig_file_name); | ||
287 | #else | ||
288 | ERR(i2d_PKCS7_bio(b, pkcs7) < 0, | ||
289 | "%s", sig_file_name); | ||
290 | #endif | ||
232 | BIO_free(b); | 291 | BIO_free(b); |
233 | } | 292 | } |
234 | 293 | ||
@@ -244,9 +303,13 @@ int main(int argc, char **argv) | |||
244 | ERR(n < 0, "%s", module_name); | 303 | ERR(n < 0, "%s", module_name); |
245 | module_size = BIO_number_written(bd); | 304 | module_size = BIO_number_written(bd); |
246 | 305 | ||
306 | #ifndef USE_PKCS7 | ||
247 | ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); | 307 | ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); |
248 | cms_size = BIO_number_written(bd) - module_size; | 308 | #else |
249 | sig_info.sig_len = htonl(cms_size); | 309 | ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); |
310 | #endif | ||
311 | sig_size = BIO_number_written(bd) - module_size; | ||
312 | sig_info.sig_len = htonl(sig_size); | ||
250 | ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); | 313 | ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); |
251 | ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); | 314 | ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); |
252 | 315 | ||
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 73455089feef..03c1652c9a1f 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -401,7 +401,7 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, | |||
401 | bool match = false; | 401 | bool match = false; |
402 | 402 | ||
403 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && | 403 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && |
404 | lockdep_is_held(&devcgroup_mutex), | 404 | !lockdep_is_held(&devcgroup_mutex), |
405 | "device_cgroup:verify_new_ex called without proper synchronization"); | 405 | "device_cgroup:verify_new_ex called without proper synchronization"); |
406 | 406 | ||
407 | if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { | 407 | if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { |
diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..39eac1fd5706 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c | |||
@@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
134 | kdebug("- %u", key->serial); | 134 | kdebug("- %u", key->serial); |
135 | key_check(key); | 135 | key_check(key); |
136 | 136 | ||
137 | /* Throw away the key data */ | ||
138 | if (key->type->destroy) | ||
139 | key->type->destroy(key); | ||
140 | |||
137 | security_key_free(key); | 141 | security_key_free(key); |
138 | 142 | ||
139 | /* deal with the user's key tracking and quota */ | 143 | /* deal with the user's key tracking and quota */ |
@@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
148 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) | 152 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) |
149 | atomic_dec(&key->user->nikeys); | 153 | atomic_dec(&key->user->nikeys); |
150 | 154 | ||
151 | /* now throw away the key memory */ | ||
152 | if (key->type->destroy) | ||
153 | key->type->destroy(key); | ||
154 | |||
155 | key_user_put(key->user); | 155 | key_user_put(key->user); |
156 | 156 | ||
157 | kfree(key->description); | 157 | kfree(key->description); |
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig index 885683a3b0bd..e0406211716b 100644 --- a/sound/arm/Kconfig +++ b/sound/arm/Kconfig | |||
@@ -9,6 +9,14 @@ menuconfig SND_ARM | |||
9 | Drivers that are implemented on ASoC can be found in | 9 | Drivers that are implemented on ASoC can be found in |
10 | "ALSA for SoC audio support" section. | 10 | "ALSA for SoC audio support" section. |
11 | 11 | ||
12 | config SND_PXA2XX_LIB | ||
13 | tristate | ||
14 | select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 | ||
15 | select SND_DMAENGINE_PCM | ||
16 | |||
17 | config SND_PXA2XX_LIB_AC97 | ||
18 | bool | ||
19 | |||
12 | if SND_ARM | 20 | if SND_ARM |
13 | 21 | ||
14 | config SND_ARMAACI | 22 | config SND_ARMAACI |
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM | |||
21 | tristate | 29 | tristate |
22 | select SND_PCM | 30 | select SND_PCM |
23 | 31 | ||
24 | config SND_PXA2XX_LIB | ||
25 | tristate | ||
26 | select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 | ||
27 | |||
28 | config SND_PXA2XX_LIB_AC97 | ||
29 | bool | ||
30 | |||
31 | config SND_PXA2XX_AC97 | 32 | config SND_PXA2XX_AC97 |
32 | tristate "AC97 driver for the Intel PXA2xx chip" | 33 | tristate "AC97 driver for the Intel PXA2xx chip" |
33 | depends on ARCH_PXA | 34 | depends on ARCH_PXA |
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 477742cb70a2..58c0aad37284 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c | |||
@@ -73,6 +73,7 @@ struct hda_tegra { | |||
73 | struct clk *hda2codec_2x_clk; | 73 | struct clk *hda2codec_2x_clk; |
74 | struct clk *hda2hdmi_clk; | 74 | struct clk *hda2hdmi_clk; |
75 | void __iomem *regs; | 75 | void __iomem *regs; |
76 | struct work_struct probe_work; | ||
76 | }; | 77 | }; |
77 | 78 | ||
78 | #ifdef CONFIG_PM | 79 | #ifdef CONFIG_PM |
@@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(struct snd_device *device) | |||
294 | static int hda_tegra_dev_free(struct snd_device *device) | 295 | static int hda_tegra_dev_free(struct snd_device *device) |
295 | { | 296 | { |
296 | struct azx *chip = device->device_data; | 297 | struct azx *chip = device->device_data; |
298 | struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); | ||
297 | 299 | ||
300 | cancel_work_sync(&hda->probe_work); | ||
298 | if (azx_bus(chip)->chip_init) { | 301 | if (azx_bus(chip)->chip_init) { |
299 | azx_stop_all_streams(chip); | 302 | azx_stop_all_streams(chip); |
300 | azx_stop_chip(chip); | 303 | azx_stop_chip(chip); |
@@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) | |||
426 | /* | 429 | /* |
427 | * constructor | 430 | * constructor |
428 | */ | 431 | */ |
432 | |||
433 | static void hda_tegra_probe_work(struct work_struct *work); | ||
434 | |||
429 | static int hda_tegra_create(struct snd_card *card, | 435 | static int hda_tegra_create(struct snd_card *card, |
430 | unsigned int driver_caps, | 436 | unsigned int driver_caps, |
431 | struct hda_tegra *hda) | 437 | struct hda_tegra *hda) |
@@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_card *card, | |||
452 | chip->single_cmd = false; | 458 | chip->single_cmd = false; |
453 | chip->snoop = true; | 459 | chip->snoop = true; |
454 | 460 | ||
461 | INIT_WORK(&hda->probe_work, hda_tegra_probe_work); | ||
462 | |||
455 | err = azx_bus_init(chip, NULL, &hda_tegra_io_ops); | 463 | err = azx_bus_init(chip, NULL, &hda_tegra_io_ops); |
456 | if (err < 0) | 464 | if (err < 0) |
457 | return err; | 465 | return err; |
@@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platform_device *pdev) | |||
499 | card->private_data = chip; | 507 | card->private_data = chip; |
500 | 508 | ||
501 | dev_set_drvdata(&pdev->dev, card); | 509 | dev_set_drvdata(&pdev->dev, card); |
510 | schedule_work(&hda->probe_work); | ||
511 | |||
512 | return 0; | ||
513 | |||
514 | out_free: | ||
515 | snd_card_free(card); | ||
516 | return err; | ||
517 | } | ||
518 | |||
519 | static void hda_tegra_probe_work(struct work_struct *work) | ||
520 | { | ||
521 | struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work); | ||
522 | struct azx *chip = &hda->chip; | ||
523 | struct platform_device *pdev = to_platform_device(hda->dev); | ||
524 | int err; | ||
502 | 525 | ||
503 | err = hda_tegra_first_init(chip, pdev); | 526 | err = hda_tegra_first_init(chip, pdev); |
504 | if (err < 0) | 527 | if (err < 0) |
@@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platform_device *pdev) | |||
520 | chip->running = 1; | 543 | chip->running = 1; |
521 | snd_hda_set_power_save(&chip->bus, power_save * 1000); | 544 | snd_hda_set_power_save(&chip->bus, power_save * 1000); |
522 | 545 | ||
523 | return 0; | 546 | out_free: |
524 | 547 | return; /* no error return from async probe */ | |
525 | out_free: | ||
526 | snd_card_free(card); | ||
527 | return err; | ||
528 | } | 548 | } |
529 | 549 | ||
530 | static int hda_tegra_remove(struct platform_device *pdev) | 550 | static int hda_tegra_remove(struct platform_device *pdev) |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 584a0343ab0c..85813de26da8 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { | |||
633 | SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), | 633 | SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), |
634 | SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), | 634 | SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), |
635 | SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), | 635 | SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), |
636 | SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), | ||
636 | {} /* terminator */ | 637 | {} /* terminator */ |
637 | }; | 638 | }; |
638 | 639 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a75b5611d1e4..16b8dcba5c12 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec, | |||
4188 | } | 4188 | } |
4189 | } | 4189 | } |
4190 | 4190 | ||
4191 | /* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */ | ||
4192 | static void alc_fixup_tpt440_dock(struct hda_codec *codec, | ||
4193 | const struct hda_fixup *fix, int action) | ||
4194 | { | ||
4195 | static const struct hda_pintbl pincfgs[] = { | ||
4196 | { 0x16, 0x21211010 }, /* dock headphone */ | ||
4197 | { 0x19, 0x21a11010 }, /* dock mic */ | ||
4198 | { } | ||
4199 | }; | ||
4200 | struct alc_spec *spec = codec->spec; | ||
4201 | |||
4202 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | ||
4203 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; | ||
4204 | codec->power_save_node = 0; /* avoid click noises */ | ||
4205 | snd_hda_apply_pincfgs(codec, pincfgs); | ||
4206 | } | ||
4207 | } | ||
4208 | |||
4191 | static void alc_shutup_dell_xps13(struct hda_codec *codec) | 4209 | static void alc_shutup_dell_xps13(struct hda_codec *codec) |
4192 | { | 4210 | { |
4193 | struct alc_spec *spec = codec->spec; | 4211 | struct alc_spec *spec = codec->spec; |
@@ -4562,7 +4580,6 @@ enum { | |||
4562 | ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, | 4580 | ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, |
4563 | ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, | 4581 | ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, |
4564 | ALC292_FIXUP_TPT440_DOCK, | 4582 | ALC292_FIXUP_TPT440_DOCK, |
4565 | ALC292_FIXUP_TPT440_DOCK2, | ||
4566 | ALC283_FIXUP_BXBT2807_MIC, | 4583 | ALC283_FIXUP_BXBT2807_MIC, |
4567 | ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, | 4584 | ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, |
4568 | ALC282_FIXUP_ASPIRE_V5_PINS, | 4585 | ALC282_FIXUP_ASPIRE_V5_PINS, |
@@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fixups[] = { | |||
5029 | }, | 5046 | }, |
5030 | [ALC292_FIXUP_TPT440_DOCK] = { | 5047 | [ALC292_FIXUP_TPT440_DOCK] = { |
5031 | .type = HDA_FIXUP_FUNC, | 5048 | .type = HDA_FIXUP_FUNC, |
5032 | .v.func = alc269_fixup_pincfg_no_hp_to_lineout, | 5049 | .v.func = alc_fixup_tpt440_dock, |
5033 | .chained = true, | ||
5034 | .chain_id = ALC292_FIXUP_TPT440_DOCK2 | ||
5035 | }, | ||
5036 | [ALC292_FIXUP_TPT440_DOCK2] = { | ||
5037 | .type = HDA_FIXUP_PINS, | ||
5038 | .v.pins = (const struct hda_pintbl[]) { | ||
5039 | { 0x16, 0x21211010 }, /* dock headphone */ | ||
5040 | { 0x19, 0x21a11010 }, /* dock mic */ | ||
5041 | { } | ||
5042 | }, | ||
5043 | .chained = true, | 5050 | .chained = true, |
5044 | .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST | 5051 | .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST |
5045 | }, | 5052 | }, |
@@ -5299,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5299 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), | 5306 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), |
5300 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), | 5307 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), |
5301 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5308 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5309 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), | ||
5302 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), | 5310 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), |
5303 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), | 5311 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), |
5304 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), | 5312 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9d947aef2c8b..def5cc8dff02 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec) | |||
4520 | return err; | 4520 | return err; |
4521 | 4521 | ||
4522 | spec = codec->spec; | 4522 | spec = codec->spec; |
4523 | codec->power_save_node = 1; | 4523 | /* enable power_save_node only for new 92HD89xx chips, as it causes |
4524 | * click noises on old 92HD73xx chips. | ||
4525 | */ | ||
4526 | if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670) | ||
4527 | codec->power_save_node = 1; | ||
4524 | spec->linear_tone_beep = 0; | 4528 | spec->linear_tone_beep = 0; |
4525 | spec->gen.mixer_nid = 0x1d; | 4529 | spec->gen.mixer_nid = 0x1d; |
4526 | spec->have_spdif_mux = 1; | 4530 | spec->have_spdif_mux = 1; |
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c index 58c3164802b8..8c907ebea189 100644 --- a/sound/soc/au1x/db1200.c +++ b/sound/soc/au1x/db1200.c | |||
@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = { | |||
129 | .cpu_dai_name = "au1xpsc_i2s.2", | 129 | .cpu_dai_name = "au1xpsc_i2s.2", |
130 | .platform_name = "au1xpsc-pcm.2", | 130 | .platform_name = "au1xpsc-pcm.2", |
131 | .codec_name = "wm8731.0-001b", | 131 | .codec_name = "wm8731.0-001b", |
132 | .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | | ||
133 | SND_SOC_DAIFMT_CBM_CFM, | ||
132 | .ops = &db1200_i2s_wm8731_ops, | 134 | .ops = &db1200_i2s_wm8731_ops, |
133 | }; | 135 | }; |
134 | 136 | ||
@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = { | |||
146 | .cpu_dai_name = "au1xpsc_i2s.3", | 148 | .cpu_dai_name = "au1xpsc_i2s.3", |
147 | .platform_name = "au1xpsc-pcm.3", | 149 | .platform_name = "au1xpsc-pcm.3", |
148 | .codec_name = "wm8731.0-001b", | 150 | .codec_name = "wm8731.0-001b", |
151 | .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | | ||
152 | SND_SOC_DAIFMT_CBM_CFM, | ||
149 | .ops = &db1200_i2s_wm8731_ops, | 153 | .ops = &db1200_i2s_wm8731_ops, |
150 | }; | 154 | }; |
151 | 155 | ||
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c index 38e853add96e..0bf9d62b91a0 100644 --- a/sound/soc/au1x/psc-i2s.c +++ b/sound/soc/au1x/psc-i2s.c | |||
@@ -296,7 +296,6 @@ static int au1xpsc_i2s_drvprobe(struct platform_device *pdev) | |||
296 | { | 296 | { |
297 | struct resource *iores, *dmares; | 297 | struct resource *iores, *dmares; |
298 | unsigned long sel; | 298 | unsigned long sel; |
299 | int ret; | ||
300 | struct au1xpsc_audio_data *wd; | 299 | struct au1xpsc_audio_data *wd; |
301 | 300 | ||
302 | wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), | 301 | wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), |
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index 3c2f0f8d6266..f823eb502367 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c | |||
@@ -50,24 +50,24 @@ struct rt298_priv { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | static struct reg_default rt298_index_def[] = { | 52 | static struct reg_default rt298_index_def[] = { |
53 | { 0x01, 0xaaaa }, | 53 | { 0x01, 0xa5a8 }, |
54 | { 0x02, 0x8aaa }, | 54 | { 0x02, 0x8e95 }, |
55 | { 0x03, 0x0002 }, | 55 | { 0x03, 0x0002 }, |
56 | { 0x04, 0xaf01 }, | 56 | { 0x04, 0xaf67 }, |
57 | { 0x08, 0x000d }, | 57 | { 0x08, 0x200f }, |
58 | { 0x09, 0xd810 }, | 58 | { 0x09, 0xd010 }, |
59 | { 0x0a, 0x0120 }, | 59 | { 0x0a, 0x0100 }, |
60 | { 0x0b, 0x0000 }, | 60 | { 0x0b, 0x0000 }, |
61 | { 0x0d, 0x2800 }, | 61 | { 0x0d, 0x2800 }, |
62 | { 0x0f, 0x0000 }, | 62 | { 0x0f, 0x0022 }, |
63 | { 0x19, 0x0a17 }, | 63 | { 0x19, 0x0217 }, |
64 | { 0x20, 0x0020 }, | 64 | { 0x20, 0x0020 }, |
65 | { 0x33, 0x0208 }, | 65 | { 0x33, 0x0208 }, |
66 | { 0x46, 0x0300 }, | 66 | { 0x46, 0x0300 }, |
67 | { 0x49, 0x0004 }, | 67 | { 0x49, 0x4004 }, |
68 | { 0x4f, 0x50e9 }, | 68 | { 0x4f, 0x50c9 }, |
69 | { 0x50, 0x2000 }, | 69 | { 0x50, 0x3000 }, |
70 | { 0x63, 0x2902 }, | 70 | { 0x63, 0x1b02 }, |
71 | { 0x67, 0x1111 }, | 71 | { 0x67, 0x1111 }, |
72 | { 0x68, 0x1016 }, | 72 | { 0x68, 0x1016 }, |
73 | { 0x69, 0x273f }, | 73 | { 0x69, 0x273f }, |
@@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c, | |||
1214 | mdelay(10); | 1214 | mdelay(10); |
1215 | 1215 | ||
1216 | if (!rt298->pdata.gpio2_en) | 1216 | if (!rt298->pdata.gpio2_en) |
1217 | regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); | 1217 | regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40); |
1218 | else | 1218 | else |
1219 | regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); | 1219 | regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); |
1220 | 1220 | ||
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 4972bf3efa91..5c101af0ac63 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c | |||
@@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = { | |||
519 | RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), | 519 | RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), |
520 | 520 | ||
521 | /* ADC Boost Volume Control */ | 521 | /* ADC Boost Volume Control */ |
522 | SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, | 522 | SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1, |
523 | RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, | 523 | RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, |
524 | adc_bst_tlv), | 524 | adc_bst_tlv), |
525 | SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, | 525 | SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2, |
526 | RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, | 526 | RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0, |
527 | adc_bst_tlv), | 527 | adc_bst_tlv), |
528 | 528 | ||
529 | /* I2S2 function select */ | 529 | /* I2S2 function select */ |
@@ -732,14 +732,14 @@ static const struct snd_kcontrol_new rt5645_mono_adc_r_mix[] = { | |||
732 | static const struct snd_kcontrol_new rt5645_dac_l_mix[] = { | 732 | static const struct snd_kcontrol_new rt5645_dac_l_mix[] = { |
733 | SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, | 733 | SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, |
734 | RT5645_M_ADCMIX_L_SFT, 1, 1), | 734 | RT5645_M_ADCMIX_L_SFT, 1, 1), |
735 | SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, | 735 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER, |
736 | RT5645_M_DAC1_L_SFT, 1, 1), | 736 | RT5645_M_DAC1_L_SFT, 1, 1), |
737 | }; | 737 | }; |
738 | 738 | ||
739 | static const struct snd_kcontrol_new rt5645_dac_r_mix[] = { | 739 | static const struct snd_kcontrol_new rt5645_dac_r_mix[] = { |
740 | SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, | 740 | SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, |
741 | RT5645_M_ADCMIX_R_SFT, 1, 1), | 741 | RT5645_M_ADCMIX_R_SFT, 1, 1), |
742 | SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, | 742 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER, |
743 | RT5645_M_DAC1_R_SFT, 1, 1), | 743 | RT5645_M_DAC1_R_SFT, 1, 1), |
744 | }; | 744 | }; |
745 | 745 | ||
@@ -1381,7 +1381,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on) | |||
1381 | regmap_write(rt5645->regmap, RT5645_PR_BASE + | 1381 | regmap_write(rt5645->regmap, RT5645_PR_BASE + |
1382 | RT5645_MAMP_INT_REG2, 0xfc00); | 1382 | RT5645_MAMP_INT_REG2, 0xfc00); |
1383 | snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); | 1383 | snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); |
1384 | mdelay(5); | 1384 | msleep(40); |
1385 | rt5645->hp_on = true; | 1385 | rt5645->hp_on = true; |
1386 | } else { | 1386 | } else { |
1387 | /* depop parameters */ | 1387 | /* depop parameters */ |
@@ -2829,13 +2829,12 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert) | |||
2829 | snd_soc_dapm_sync(dapm); | 2829 | snd_soc_dapm_sync(dapm); |
2830 | rt5645->jack_type = SND_JACK_HEADPHONE; | 2830 | rt5645->jack_type = SND_JACK_HEADPHONE; |
2831 | } | 2831 | } |
2832 | |||
2833 | snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200); | ||
2834 | snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d); | ||
2835 | snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001); | ||
2836 | } else { /* jack out */ | 2832 | } else { /* jack out */ |
2837 | rt5645->jack_type = 0; | 2833 | rt5645->jack_type = 0; |
2838 | 2834 | ||
2835 | regmap_update_bits(rt5645->regmap, RT5645_HP_VOL, | ||
2836 | RT5645_L_MUTE | RT5645_R_MUTE, | ||
2837 | RT5645_L_MUTE | RT5645_R_MUTE); | ||
2839 | regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, | 2838 | regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, |
2840 | RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD); | 2839 | RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD); |
2841 | regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, | 2840 | regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, |
@@ -2880,8 +2879,6 @@ int rt5645_set_jack_detect(struct snd_soc_codec *codec, | |||
2880 | rt5645->en_button_func = true; | 2879 | rt5645->en_button_func = true; |
2881 | regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, | 2880 | regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, |
2882 | RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ); | 2881 | RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ); |
2883 | regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1, | ||
2884 | RT5645_HP_CB_MASK, RT5645_HP_CB_PU); | ||
2885 | regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1, | 2882 | regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1, |
2886 | RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL); | 2883 | RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL); |
2887 | } | 2884 | } |
@@ -3205,6 +3202,13 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = { | |||
3205 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), | 3202 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), |
3206 | }, | 3203 | }, |
3207 | }, | 3204 | }, |
3205 | { | ||
3206 | .ident = "Google Ultima", | ||
3207 | .callback = strago_quirk_cb, | ||
3208 | .matches = { | ||
3209 | DMI_MATCH(DMI_PRODUCT_NAME, "Ultima"), | ||
3210 | }, | ||
3211 | }, | ||
3208 | { } | 3212 | { } |
3209 | }; | 3213 | }; |
3210 | 3214 | ||
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0e4cfc6ac649..8c964cfb120d 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h | |||
@@ -39,8 +39,8 @@ | |||
39 | #define RT5645_STO1_ADC_DIG_VOL 0x1c | 39 | #define RT5645_STO1_ADC_DIG_VOL 0x1c |
40 | #define RT5645_MONO_ADC_DIG_VOL 0x1d | 40 | #define RT5645_MONO_ADC_DIG_VOL 0x1d |
41 | #define RT5645_ADC_BST_VOL1 0x1e | 41 | #define RT5645_ADC_BST_VOL1 0x1e |
42 | /* Mixer - D-D */ | ||
43 | #define RT5645_ADC_BST_VOL2 0x20 | 42 | #define RT5645_ADC_BST_VOL2 0x20 |
43 | /* Mixer - D-D */ | ||
44 | #define RT5645_STO1_ADC_MIXER 0x27 | 44 | #define RT5645_STO1_ADC_MIXER 0x27 |
45 | #define RT5645_MONO_ADC_MIXER 0x28 | 45 | #define RT5645_MONO_ADC_MIXER 0x28 |
46 | #define RT5645_AD_DA_MIXER 0x29 | 46 | #define RT5645_AD_DA_MIXER 0x29 |
@@ -315,12 +315,14 @@ | |||
315 | #define RT5645_STO1_ADC_R_BST_SFT 12 | 315 | #define RT5645_STO1_ADC_R_BST_SFT 12 |
316 | #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) | 316 | #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) |
317 | #define RT5645_STO1_ADC_COMP_SFT 10 | 317 | #define RT5645_STO1_ADC_COMP_SFT 10 |
318 | #define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) | 318 | |
319 | #define RT5645_STO2_ADC_L_BST_SFT 8 | 319 | /* ADC Boost Volume Control (0x20) */ |
320 | #define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) | 320 | #define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14) |
321 | #define RT5645_STO2_ADC_R_BST_SFT 6 | 321 | #define RT5645_MONO_ADC_L_BST_SFT 14 |
322 | #define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) | 322 | #define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12) |
323 | #define RT5645_STO2_ADC_COMP_SFT 4 | 323 | #define RT5645_MONO_ADC_R_BST_SFT 12 |
324 | #define RT5645_MONO_ADC_COMP_MASK (0x3 << 10) | ||
325 | #define RT5645_MONO_ADC_COMP_SFT 10 | ||
324 | 326 | ||
325 | /* Stereo2 ADC Mixer Control (0x26) */ | 327 | /* Stereo2 ADC Mixer Control (0x26) */ |
326 | #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) | 328 | #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index bfda25ef0dd4..f540f82b1f27 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) | |||
1376 | sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); | 1376 | sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); |
1377 | 1377 | ||
1378 | snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, | 1378 | snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, |
1379 | SGTL5000_BIAS_R_MASK, | 1379 | SGTL5000_BIAS_VOLT_MASK, |
1380 | sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); | 1380 | sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT); |
1381 | /* | 1381 | /* |
1382 | * disable DAP | 1382 | * disable DAP |
1383 | * TODO: | 1383 | * TODO: |
@@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, | |||
1549 | else { | 1549 | else { |
1550 | sgtl5000->micbias_voltage = 0; | 1550 | sgtl5000->micbias_voltage = 0; |
1551 | dev_err(&client->dev, | 1551 | dev_err(&client->dev, |
1552 | "Unsuitable MicBias resistor\n"); | 1552 | "Unsuitable MicBias voltage\n"); |
1553 | } | 1553 | } |
1554 | } else { | 1554 | } else { |
1555 | sgtl5000->micbias_voltage = 0; | 1555 | sgtl5000->micbias_voltage = 0; |
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c index e3a0bca28bcf..cc1d3981fa4b 100644 --- a/sound/soc/codecs/tas2552.c +++ b/sound/soc/codecs/tas2552.c | |||
@@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = { | |||
549 | /* | 549 | /* |
550 | * DAC digital volumes. From -7 to 24 dB in 1 dB steps | 550 | * DAC digital volumes. From -7 to 24 dB in 1 dB steps |
551 | */ | 551 | */ |
552 | static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); | 552 | static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); |
553 | 553 | ||
554 | static const char * const tas2552_din_source_select[] = { | 554 | static const char * const tas2552_din_source_select[] = { |
555 | "Muted", | 555 | "Muted", |
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 1a82b19b2644..8739126a1f6f 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c | |||
@@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec) | |||
1509 | snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); | 1509 | snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); |
1510 | snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); | 1510 | snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); |
1511 | 1511 | ||
1512 | /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ | 1512 | /* On tlv320aic3104, these registers are reserved and must not be written */ |
1513 | snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); | 1513 | if (aic3x->model != AIC3X_MODEL_3104) { |
1514 | snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); | 1514 | /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ |
1515 | snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); | 1515 | snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); |
1516 | snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); | 1516 | snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); |
1517 | /* Line2 Line Out default volume, disconnect from Output Mixer */ | 1517 | snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); |
1518 | snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); | 1518 | snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); |
1519 | snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); | 1519 | /* Line2 Line Out default volume, disconnect from Output Mixer */ |
1520 | snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); | ||
1521 | snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); | ||
1522 | } | ||
1520 | 1523 | ||
1521 | switch (aic3x->model) { | 1524 | switch (aic3x->model) { |
1522 | case AIC3X_MODEL_3X: | 1525 | case AIC3X_MODEL_3X: |
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index f2c6ad4b8fde..581ec1502228 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c | |||
@@ -577,7 +577,6 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
577 | struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); | 577 | struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); |
578 | unsigned long flags; | 578 | unsigned long flags; |
579 | int ret; | 579 | int ret; |
580 | const struct firmware *fw; | ||
581 | struct spi_message m; | 580 | struct spi_message m; |
582 | struct spi_transfer t; | 581 | struct spi_transfer t; |
583 | struct dfw_pllrec pll_rec; | 582 | struct dfw_pllrec pll_rec; |
@@ -623,14 +622,6 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
623 | wm0010->state = WM0010_OUT_OF_RESET; | 622 | wm0010->state = WM0010_OUT_OF_RESET; |
624 | spin_unlock_irqrestore(&wm0010->irq_lock, flags); | 623 | spin_unlock_irqrestore(&wm0010->irq_lock, flags); |
625 | 624 | ||
626 | /* First the bootloader */ | ||
627 | ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev); | ||
628 | if (ret != 0) { | ||
629 | dev_err(codec->dev, "Failed to request stage2 loader: %d\n", | ||
630 | ret); | ||
631 | goto abort; | ||
632 | } | ||
633 | |||
634 | if (!wait_for_completion_timeout(&wm0010->boot_completion, | 625 | if (!wait_for_completion_timeout(&wm0010->boot_completion, |
635 | msecs_to_jiffies(20))) | 626 | msecs_to_jiffies(20))) |
636 | dev_err(codec->dev, "Failed to get interrupt from DSP\n"); | 627 | dev_err(codec->dev, "Failed to get interrupt from DSP\n"); |
@@ -673,7 +664,7 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
673 | 664 | ||
674 | img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA); | 665 | img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA); |
675 | if (!img_swap) | 666 | if (!img_swap) |
676 | goto abort; | 667 | goto abort_out; |
677 | 668 | ||
678 | /* We need to re-order for 0010 */ | 669 | /* We need to re-order for 0010 */ |
679 | byte_swap_64((u64 *)&pll_rec, img_swap, len); | 670 | byte_swap_64((u64 *)&pll_rec, img_swap, len); |
@@ -688,16 +679,16 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
688 | spi_message_add_tail(&t, &m); | 679 | spi_message_add_tail(&t, &m); |
689 | 680 | ||
690 | ret = spi_sync(spi, &m); | 681 | ret = spi_sync(spi, &m); |
691 | if (ret != 0) { | 682 | if (ret) { |
692 | dev_err(codec->dev, "First PLL write failed: %d\n", ret); | 683 | dev_err(codec->dev, "First PLL write failed: %d\n", ret); |
693 | goto abort; | 684 | goto abort_swap; |
694 | } | 685 | } |
695 | 686 | ||
696 | /* Use a second send of the message to get the return status */ | 687 | /* Use a second send of the message to get the return status */ |
697 | ret = spi_sync(spi, &m); | 688 | ret = spi_sync(spi, &m); |
698 | if (ret != 0) { | 689 | if (ret) { |
699 | dev_err(codec->dev, "Second PLL write failed: %d\n", ret); | 690 | dev_err(codec->dev, "Second PLL write failed: %d\n", ret); |
700 | goto abort; | 691 | goto abort_swap; |
701 | } | 692 | } |
702 | 693 | ||
703 | p = (u32 *)out; | 694 | p = (u32 *)out; |
@@ -730,6 +721,10 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
730 | 721 | ||
731 | return 0; | 722 | return 0; |
732 | 723 | ||
724 | abort_swap: | ||
725 | kfree(img_swap); | ||
726 | abort_out: | ||
727 | kfree(out); | ||
733 | abort: | 728 | abort: |
734 | /* Put the chip back into reset */ | 729 | /* Put the chip back into reset */ |
735 | wm0010_halt(codec); | 730 | wm0010_halt(codec); |
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index e3b7d0c57411..dbd88408861a 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c | |||
@@ -211,28 +211,38 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, | |||
211 | return wm8960_set_deemph(codec); | 211 | return wm8960_set_deemph(codec); |
212 | } | 212 | } |
213 | 213 | ||
214 | static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0); | 214 | static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); |
215 | static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1); | 215 | static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1725, 75, 0); |
216 | static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); | ||
216 | static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0); | 217 | static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0); |
217 | static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); | 218 | static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); |
218 | static const DECLARE_TLV_DB_SCALE(boost_tlv, -1200, 300, 1); | 219 | static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1); |
220 | static const unsigned int micboost_tlv[] = { | ||
221 | TLV_DB_RANGE_HEAD(2), | ||
222 | 0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0), | ||
223 | 2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0), | ||
224 | }; | ||
219 | 225 | ||
220 | static const struct snd_kcontrol_new wm8960_snd_controls[] = { | 226 | static const struct snd_kcontrol_new wm8960_snd_controls[] = { |
221 | SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, | 227 | SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, |
222 | 0, 63, 0, adc_tlv), | 228 | 0, 63, 0, inpga_tlv), |
223 | SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, | 229 | SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, |
224 | 6, 1, 0), | 230 | 6, 1, 0), |
225 | SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, | 231 | SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, |
226 | 7, 1, 0), | 232 | 7, 1, 0), |
227 | 233 | ||
228 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", | 234 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", |
229 | WM8960_INBMIX1, 4, 7, 0, boost_tlv), | 235 | WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), |
230 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", | 236 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", |
231 | WM8960_INBMIX1, 1, 7, 0, boost_tlv), | 237 | WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv), |
232 | SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", | 238 | SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", |
233 | WM8960_INBMIX2, 4, 7, 0, boost_tlv), | 239 | WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv), |
234 | SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", | 240 | SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", |
235 | WM8960_INBMIX2, 1, 7, 0, boost_tlv), | 241 | WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv), |
242 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume", | ||
243 | WM8960_RINPATH, 4, 3, 0, micboost_tlv), | ||
244 | SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT1 Volume", | ||
245 | WM8960_LINPATH, 4, 3, 0, micboost_tlv), | ||
236 | 246 | ||
237 | SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC, | 247 | SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC, |
238 | 0, 255, 0, dac_tlv), | 248 | 0, 255, 0, dac_tlv), |
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index b4eb975da981..39ebd7bf4f53 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -2944,7 +2944,8 @@ static int wm8962_mute(struct snd_soc_dai *dai, int mute) | |||
2944 | WM8962_DAC_MUTE, val); | 2944 | WM8962_DAC_MUTE, val); |
2945 | } | 2945 | } |
2946 | 2946 | ||
2947 | #define WM8962_RATES SNDRV_PCM_RATE_8000_96000 | 2947 | #define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\ |
2948 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) | ||
2948 | 2949 | ||
2949 | #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ | 2950 | #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ |
2950 | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) | 2951 | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) |
@@ -3759,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, | |||
3759 | ret = snd_soc_register_codec(&i2c->dev, | 3760 | ret = snd_soc_register_codec(&i2c->dev, |
3760 | &soc_codec_dev_wm8962, &wm8962_dai, 1); | 3761 | &soc_codec_dev_wm8962, &wm8962_dai, 1); |
3761 | if (ret < 0) | 3762 | if (ret < 0) |
3762 | goto err_enable; | 3763 | goto err_pm_runtime; |
3763 | 3764 | ||
3764 | regcache_cache_only(wm8962->regmap, true); | 3765 | regcache_cache_only(wm8962->regmap, true); |
3765 | 3766 | ||
@@ -3768,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, | |||
3768 | 3769 | ||
3769 | return 0; | 3770 | return 0; |
3770 | 3771 | ||
3772 | err_pm_runtime: | ||
3773 | pm_runtime_disable(&i2c->dev); | ||
3771 | err_enable: | 3774 | err_enable: |
3772 | regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); | 3775 | regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); |
3773 | err: | 3776 | err: |
@@ -3777,6 +3780,7 @@ err: | |||
3777 | static int wm8962_i2c_remove(struct i2c_client *client) | 3780 | static int wm8962_i2c_remove(struct i2c_client *client) |
3778 | { | 3781 | { |
3779 | snd_soc_unregister_codec(&client->dev); | 3782 | snd_soc_unregister_codec(&client->dev); |
3783 | pm_runtime_disable(&client->dev); | ||
3780 | return 0; | 3784 | return 0; |
3781 | } | 3785 | } |
3782 | 3786 | ||
@@ -3804,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev) | |||
3804 | 3808 | ||
3805 | wm8962_reset(wm8962); | 3809 | wm8962_reset(wm8962); |
3806 | 3810 | ||
3811 | regcache_mark_dirty(wm8962->regmap); | ||
3812 | |||
3807 | /* SYSCLK defaults to on; make sure it is off so we can safely | 3813 | /* SYSCLK defaults to on; make sure it is off so we can safely |
3808 | * write to registers if the device is declocked. | 3814 | * write to registers if the device is declocked. |
3809 | */ | 3815 | */ |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index add6bb99661d..7d45d98a861f 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
@@ -663,7 +663,7 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, | |||
663 | u8 rx_ser = 0; | 663 | u8 rx_ser = 0; |
664 | u8 slots = mcasp->tdm_slots; | 664 | u8 slots = mcasp->tdm_slots; |
665 | u8 max_active_serializers = (channels + slots - 1) / slots; | 665 | u8 max_active_serializers = (channels + slots - 1) / slots; |
666 | int active_serializers, numevt, n; | 666 | int active_serializers, numevt; |
667 | u32 reg; | 667 | u32 reg; |
668 | /* Default configuration */ | 668 | /* Default configuration */ |
669 | if (mcasp->version < MCASP_VERSION_3) | 669 | if (mcasp->version < MCASP_VERSION_3) |
@@ -745,9 +745,8 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, | |||
745 | * The number of words for numevt need to be in steps of active | 745 | * The number of words for numevt need to be in steps of active |
746 | * serializers. | 746 | * serializers. |
747 | */ | 747 | */ |
748 | n = numevt % active_serializers; | 748 | numevt = (numevt / active_serializers) * active_serializers; |
749 | if (n) | 749 | |
750 | numevt += (active_serializers - n); | ||
751 | while (period_words % numevt && numevt > 0) | 750 | while (period_words % numevt && numevt > 0) |
752 | numevt -= active_serializers; | 751 | numevt -= active_serializers; |
753 | if (numevt <= 0) | 752 | if (numevt <= 0) |
@@ -1299,6 +1298,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = { | |||
1299 | .ops = &davinci_mcasp_dai_ops, | 1298 | .ops = &davinci_mcasp_dai_ops, |
1300 | 1299 | ||
1301 | .symmetric_samplebits = 1, | 1300 | .symmetric_samplebits = 1, |
1301 | .symmetric_rates = 1, | ||
1302 | }, | 1302 | }, |
1303 | { | 1303 | { |
1304 | .name = "davinci-mcasp.1", | 1304 | .name = "davinci-mcasp.1", |
@@ -1685,7 +1685,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) | |||
1685 | 1685 | ||
1686 | irq = platform_get_irq_byname(pdev, "common"); | 1686 | irq = platform_get_irq_byname(pdev, "common"); |
1687 | if (irq >= 0) { | 1687 | if (irq >= 0) { |
1688 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n", | 1688 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common", |
1689 | dev_name(&pdev->dev)); | 1689 | dev_name(&pdev->dev)); |
1690 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 1690 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
1691 | davinci_mcasp_common_irq_handler, | 1691 | davinci_mcasp_common_irq_handler, |
@@ -1702,7 +1702,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) | |||
1702 | 1702 | ||
1703 | irq = platform_get_irq_byname(pdev, "rx"); | 1703 | irq = platform_get_irq_byname(pdev, "rx"); |
1704 | if (irq >= 0) { | 1704 | if (irq >= 0) { |
1705 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n", | 1705 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx", |
1706 | dev_name(&pdev->dev)); | 1706 | dev_name(&pdev->dev)); |
1707 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 1707 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
1708 | davinci_mcasp_rx_irq_handler, | 1708 | davinci_mcasp_rx_irq_handler, |
@@ -1717,7 +1717,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) | |||
1717 | 1717 | ||
1718 | irq = platform_get_irq_byname(pdev, "tx"); | 1718 | irq = platform_get_irq_byname(pdev, "tx"); |
1719 | if (irq >= 0) { | 1719 | if (irq >= 0) { |
1720 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n", | 1720 | irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx", |
1721 | dev_name(&pdev->dev)); | 1721 | dev_name(&pdev->dev)); |
1722 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 1722 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
1723 | davinci_mcasp_tx_irq_handler, | 1723 | davinci_mcasp_tx_irq_handler, |
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index a3e97b46b64e..ba34252b7bba 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c | |||
@@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream) | |||
131 | 131 | ||
132 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) { | 132 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) { |
133 | for (i = 0; i < 4; i++) | 133 | for (i = 0; i < 4; i++) |
134 | i2s_write_reg(dev->i2s_base, TOR(i), 0); | 134 | i2s_read_reg(dev->i2s_base, TOR(i)); |
135 | } else { | 135 | } else { |
136 | for (i = 0; i < 4; i++) | 136 | for (i = 0; i < 4; i++) |
137 | i2s_write_reg(dev->i2s_base, ROR(i), 0); | 137 | i2s_read_reg(dev->i2s_base, ROR(i)); |
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
141 | static void i2s_start(struct dw_i2s_dev *dev, | 141 | static void i2s_start(struct dw_i2s_dev *dev, |
142 | struct snd_pcm_substream *substream) | 142 | struct snd_pcm_substream *substream) |
143 | { | 143 | { |
144 | 144 | u32 i, irq; | |
145 | i2s_write_reg(dev->i2s_base, IER, 1); | 145 | i2s_write_reg(dev->i2s_base, IER, 1); |
146 | 146 | ||
147 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 147 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
148 | for (i = 0; i < 4; i++) { | ||
149 | irq = i2s_read_reg(dev->i2s_base, IMR(i)); | ||
150 | i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30); | ||
151 | } | ||
148 | i2s_write_reg(dev->i2s_base, ITER, 1); | 152 | i2s_write_reg(dev->i2s_base, ITER, 1); |
149 | else | 153 | } else { |
154 | for (i = 0; i < 4; i++) { | ||
155 | irq = i2s_read_reg(dev->i2s_base, IMR(i)); | ||
156 | i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03); | ||
157 | } | ||
150 | i2s_write_reg(dev->i2s_base, IRER, 1); | 158 | i2s_write_reg(dev->i2s_base, IRER, 1); |
159 | } | ||
151 | 160 | ||
152 | i2s_write_reg(dev->i2s_base, CER, 1); | 161 | i2s_write_reg(dev->i2s_base, CER, 1); |
153 | } | 162 | } |
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index 5aeb6ed4827e..96f55ae75c71 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c | |||
@@ -488,7 +488,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) | |||
488 | priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; | 488 | priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; |
489 | } else { | 489 | } else { |
490 | dev_err(&pdev->dev, "unknown Device Tree compatible\n"); | 490 | dev_err(&pdev->dev, "unknown Device Tree compatible\n"); |
491 | return -EINVAL; | 491 | ret = -EINVAL; |
492 | goto asrc_fail; | ||
492 | } | 493 | } |
493 | 494 | ||
494 | /* Common settings for corresponding Freescale CPU DAI driver */ | 495 | /* Common settings for corresponding Freescale CPU DAI driver */ |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 8ec6fb208ea0..37c5cd4d0e59 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -249,7 +249,8 @@ MODULE_DEVICE_TABLE(of, fsl_ssi_ids); | |||
249 | 249 | ||
250 | static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) | 250 | static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) |
251 | { | 251 | { |
252 | return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97); | 252 | return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) == |
253 | SND_SOC_DAIFMT_AC97; | ||
253 | } | 254 | } |
254 | 255 | ||
255 | static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) | 256 | static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) |
@@ -947,7 +948,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev, | |||
947 | CCSR_SSI_SCR_TCH_EN); | 948 | CCSR_SSI_SCR_TCH_EN); |
948 | } | 949 | } |
949 | 950 | ||
950 | if (fmt & SND_SOC_DAIFMT_AC97) | 951 | if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97) |
951 | fsl_ssi_setup_ac97(ssi_private); | 952 | fsl_ssi_setup_ac97(ssi_private); |
952 | 953 | ||
953 | return 0; | 954 | return 0; |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 48b2d24dd1f0..b95132e2f9dc 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c | |||
@@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) | |||
95 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { | 95 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { |
96 | case SND_SOC_DAIFMT_I2S: | 96 | case SND_SOC_DAIFMT_I2S: |
97 | /* data on rising edge of bclk, frame low 1clk before data */ | 97 | /* data on rising edge of bclk, frame low 1clk before data */ |
98 | strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; | 98 | strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI | |
99 | SSI_STCR_TEFS; | ||
99 | scr |= SSI_SCR_NET; | 100 | scr |= SSI_SCR_NET; |
100 | if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { | 101 | if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { |
101 | scr &= ~SSI_I2S_MODE_MASK; | 102 | scr &= ~SSI_I2S_MODE_MASK; |
@@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) | |||
104 | break; | 105 | break; |
105 | case SND_SOC_DAIFMT_LEFT_J: | 106 | case SND_SOC_DAIFMT_LEFT_J: |
106 | /* data on rising edge of bclk, frame high with data */ | 107 | /* data on rising edge of bclk, frame high with data */ |
107 | strcr |= SSI_STCR_TXBIT0; | 108 | strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP; |
108 | break; | 109 | break; |
109 | case SND_SOC_DAIFMT_DSP_B: | 110 | case SND_SOC_DAIFMT_DSP_B: |
110 | /* data on rising edge of bclk, frame high with data */ | 111 | /* data on rising edge of bclk, frame high with data */ |
111 | strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; | 112 | strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL; |
112 | break; | 113 | break; |
113 | case SND_SOC_DAIFMT_DSP_A: | 114 | case SND_SOC_DAIFMT_DSP_A: |
114 | /* data on rising edge of bclk, frame high 1clk before data */ | 115 | /* data on rising edge of bclk, frame high 1clk before data */ |
115 | strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; | 116 | strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL | |
117 | SSI_STCR_TEFS; | ||
116 | break; | 118 | break; |
117 | } | 119 | } |
118 | 120 | ||
119 | /* DAI clock inversion */ | 121 | /* DAI clock inversion */ |
120 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 122 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
121 | case SND_SOC_DAIFMT_IB_IF: | 123 | case SND_SOC_DAIFMT_IB_IF: |
122 | strcr |= SSI_STCR_TFSI; | 124 | strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI; |
123 | strcr &= ~SSI_STCR_TSCKP; | ||
124 | break; | 125 | break; |
125 | case SND_SOC_DAIFMT_IB_NF: | 126 | case SND_SOC_DAIFMT_IB_NF: |
126 | strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); | 127 | strcr ^= SSI_STCR_TSCKP; |
127 | break; | 128 | break; |
128 | case SND_SOC_DAIFMT_NB_IF: | 129 | case SND_SOC_DAIFMT_NB_IF: |
129 | strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; | 130 | strcr ^= SSI_STCR_TFSI; |
130 | break; | 131 | break; |
131 | case SND_SOC_DAIFMT_NB_NF: | 132 | case SND_SOC_DAIFMT_NB_NF: |
132 | strcr &= ~SSI_STCR_TFSI; | ||
133 | strcr |= SSI_STCR_TSCKP; | ||
134 | break; | 133 | break; |
135 | } | 134 | } |
136 | 135 | ||
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index f6efa9d4acad..b27f25f70730 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c | |||
@@ -302,6 +302,10 @@ struct sst_hsw { | |||
302 | struct sst_hsw_ipc_dx_reply dx; | 302 | struct sst_hsw_ipc_dx_reply dx; |
303 | void *dx_context; | 303 | void *dx_context; |
304 | dma_addr_t dx_context_paddr; | 304 | dma_addr_t dx_context_paddr; |
305 | enum sst_hsw_device_id dx_dev; | ||
306 | enum sst_hsw_device_mclk dx_mclk; | ||
307 | enum sst_hsw_device_mode dx_mode; | ||
308 | u32 dx_clock_divider; | ||
305 | 309 | ||
306 | /* boot */ | 310 | /* boot */ |
307 | wait_queue_head_t boot_wait; | 311 | wait_queue_head_t boot_wait; |
@@ -1400,10 +1404,10 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw, | |||
1400 | 1404 | ||
1401 | trace_ipc_request("set device config", dev); | 1405 | trace_ipc_request("set device config", dev); |
1402 | 1406 | ||
1403 | config.ssp_interface = dev; | 1407 | hsw->dx_dev = config.ssp_interface = dev; |
1404 | config.clock_frequency = mclk; | 1408 | hsw->dx_mclk = config.clock_frequency = mclk; |
1405 | config.mode = mode; | 1409 | hsw->dx_mode = config.mode = mode; |
1406 | config.clock_divider = clock_divider; | 1410 | hsw->dx_clock_divider = config.clock_divider = clock_divider; |
1407 | if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER) | 1411 | if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER) |
1408 | config.channels = 4; | 1412 | config.channels = 4; |
1409 | else | 1413 | else |
@@ -1704,10 +1708,10 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw) | |||
1704 | return -EIO; | 1708 | return -EIO; |
1705 | } | 1709 | } |
1706 | 1710 | ||
1707 | /* Set ADSP SSP port settings */ | 1711 | /* Set ADSP SSP port settings - sadly the FW does not store SSP port |
1708 | ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0, | 1712 | settings as part of the PM context. */ |
1709 | SST_HSW_DEVICE_MCLK_FREQ_24_MHZ, | 1713 | ret = sst_hsw_device_set_config(hsw, hsw->dx_dev, hsw->dx_mclk, |
1710 | SST_HSW_DEVICE_CLOCK_MASTER, 9); | 1714 | hsw->dx_mode, hsw->dx_clock_divider); |
1711 | if (ret < 0) | 1715 | if (ret < 0) |
1712 | dev_err(dev, "error: SSP re-initialization failed\n"); | 1716 | dev_err(dev, "error: SSP re-initialization failed\n"); |
1713 | 1717 | ||
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c index d190fe017559..f5baf3c38863 100644 --- a/sound/soc/mediatek/mtk-afe-pcm.c +++ b/sound/soc/mediatek/mtk-afe-pcm.c | |||
@@ -549,6 +549,23 @@ static int mtk_afe_dais_startup(struct snd_pcm_substream *substream, | |||
549 | memif->substream = substream; | 549 | memif->substream = substream; |
550 | 550 | ||
551 | snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); | 551 | snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); |
552 | |||
553 | /* | ||
554 | * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be | ||
555 | * smaller than period_size due to AFE's internal buffer. | ||
556 | * This easily leads to overrun when avail_min is period_size. | ||
557 | * One more period can hold the possible unread buffer. | ||
558 | */ | ||
559 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { | ||
560 | ret = snd_pcm_hw_constraint_minmax(runtime, | ||
561 | SNDRV_PCM_HW_PARAM_PERIODS, | ||
562 | 3, | ||
563 | mtk_afe_hardware.periods_max); | ||
564 | if (ret < 0) { | ||
565 | dev_err(afe->dev, "hw_constraint_minmax failed\n"); | ||
566 | return ret; | ||
567 | } | ||
568 | } | ||
552 | ret = snd_pcm_hw_constraint_integer(runtime, | 569 | ret = snd_pcm_hw_constraint_integer(runtime, |
553 | SNDRV_PCM_HW_PARAM_PERIODS); | 570 | SNDRV_PCM_HW_PARAM_PERIODS); |
554 | if (ret < 0) | 571 | if (ret < 0) |
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index 39cea80846c3..f2bf8661dd21 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config SND_PXA2XX_SOC | 1 | config SND_PXA2XX_SOC |
2 | tristate "SoC Audio for the Intel PXA2xx chip" | 2 | tristate "SoC Audio for the Intel PXA2xx chip" |
3 | depends on ARCH_PXA | 3 | depends on ARCH_PXA |
4 | select SND_ARM | ||
5 | select SND_PXA2XX_LIB | 4 | select SND_PXA2XX_LIB |
6 | help | 5 | help |
7 | Say Y or M if you want to add support for codecs attached to | 6 | Say Y or M if you want to add support for codecs attached to |
@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97 | |||
25 | config SND_PXA2XX_SOC_AC97 | 24 | config SND_PXA2XX_SOC_AC97 |
26 | tristate | 25 | tristate |
27 | select AC97_BUS | 26 | select AC97_BUS |
28 | select SND_ARM | ||
29 | select SND_PXA2XX_LIB_AC97 | 27 | select SND_PXA2XX_LIB_AC97 |
30 | select SND_SOC_AC97_BUS | 28 | select SND_SOC_AC97_BUS |
31 | 29 | ||
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c index 1f6054650991..9e4b04e0fbd1 100644 --- a/sound/soc/pxa/pxa2xx-ac97.c +++ b/sound/soc/pxa/pxa2xx-ac97.c | |||
@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = { | |||
49 | .reset = pxa2xx_ac97_cold_reset, | 49 | .reset = pxa2xx_ac97_cold_reset, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12; | 52 | static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11; |
53 | static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { | 53 | static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { |
54 | .addr = __PREG(PCDR), | 54 | .addr = __PREG(PCDR), |
55 | .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | 55 | .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { | |||
57 | .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, | 57 | .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11; | 60 | static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12; |
61 | static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { | 61 | static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { |
62 | .addr = __PREG(PCDR), | 62 | .addr = __PREG(PCDR), |
63 | .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | 63 | .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index f4bf21a5539b..ff8bda471b25 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -3501,7 +3501,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, | |||
3501 | 3501 | ||
3502 | default: | 3502 | default: |
3503 | WARN(1, "Unknown event %d\n", event); | 3503 | WARN(1, "Unknown event %d\n", event); |
3504 | return -EINVAL; | 3504 | ret = -EINVAL; |
3505 | } | 3505 | } |
3506 | 3506 | ||
3507 | out: | 3507 | out: |
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index 362c69ac1d6c..53dd085d3ee2 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c | |||
@@ -101,6 +101,15 @@ static struct snd_soc_codec_driver dummy_codec; | |||
101 | SNDRV_PCM_FMTBIT_S32_LE | \ | 101 | SNDRV_PCM_FMTBIT_S32_LE | \ |
102 | SNDRV_PCM_FMTBIT_U32_LE | \ | 102 | SNDRV_PCM_FMTBIT_U32_LE | \ |
103 | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) | 103 | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) |
104 | /* | ||
105 | * The dummy CODEC is only meant to be used in situations where there is no | ||
106 | * actual hardware. | ||
107 | * | ||
108 | * If there is actual hardware even if it does not have a control bus | ||
109 | * the hardware will still have constraints like supported samplerates, etc. | ||
110 | * which should be modelled. And the data flow graph also should be modelled | ||
111 | * using DAPM. | ||
112 | */ | ||
104 | static struct snd_soc_dai_driver dummy_dai = { | 113 | static struct snd_soc_dai_driver dummy_dai = { |
105 | .name = "snd-soc-dummy-dai", | 114 | .name = "snd-soc-dummy-dai", |
106 | .playback = { | 115 | .playback = { |
diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig index 0a53053495f3..4fb91412ebec 100644 --- a/sound/soc/spear/Kconfig +++ b/sound/soc/spear/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SND_SPEAR_SOC | 1 | config SND_SPEAR_SOC |
2 | tristate | 2 | tristate |
3 | select SND_DMAENGINE_PCM | 3 | select SND_SOC_GENERIC_DMAENGINE_PCM |
4 | 4 | ||
5 | config SND_SPEAR_SPDIF_OUT | 5 | config SND_SPEAR_SPDIF_OUT |
6 | tristate | 6 | tristate |
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index f6eefe1b8f8f..843f037a317d 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c | |||
@@ -989,8 +989,8 @@ static int uni_player_parse_dt(struct platform_device *pdev, | |||
989 | if (!info) | 989 | if (!info) |
990 | return -ENOMEM; | 990 | return -ENOMEM; |
991 | 991 | ||
992 | of_property_read_u32(pnode, "version", &player->ver); | 992 | if (of_property_read_u32(pnode, "version", &player->ver) || |
993 | if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { | 993 | player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { |
994 | dev_err(dev, "Unknown uniperipheral version "); | 994 | dev_err(dev, "Unknown uniperipheral version "); |
995 | return -EINVAL; | 995 | return -EINVAL; |
996 | } | 996 | } |
@@ -998,10 +998,16 @@ static int uni_player_parse_dt(struct platform_device *pdev, | |||
998 | if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) | 998 | if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) |
999 | info->underflow_enabled = 1; | 999 | info->underflow_enabled = 1; |
1000 | 1000 | ||
1001 | of_property_read_u32(pnode, "uniperiph-id", &info->id); | 1001 | if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) { |
1002 | dev_err(dev, "uniperipheral id not defined"); | ||
1003 | return -EINVAL; | ||
1004 | } | ||
1002 | 1005 | ||
1003 | /* Read the device mode property */ | 1006 | /* Read the device mode property */ |
1004 | of_property_read_string(pnode, "mode", &mode); | 1007 | if (of_property_read_string(pnode, "mode", &mode)) { |
1008 | dev_err(dev, "uniperipheral mode not defined"); | ||
1009 | return -EINVAL; | ||
1010 | } | ||
1005 | 1011 | ||
1006 | if (strcasecmp(mode, "hdmi") == 0) | 1012 | if (strcasecmp(mode, "hdmi") == 0) |
1007 | info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; | 1013 | info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; |
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index c502626f339b..f791239a3087 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c | |||
@@ -316,7 +316,11 @@ static int uni_reader_parse_dt(struct platform_device *pdev, | |||
316 | if (!info) | 316 | if (!info) |
317 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | 318 | ||
319 | of_property_read_u32(node, "version", &reader->ver); | 319 | if (of_property_read_u32(node, "version", &reader->ver) || |
320 | reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { | ||
321 | dev_err(&pdev->dev, "Unknown uniperipheral version "); | ||
322 | return -EINVAL; | ||
323 | } | ||
320 | 324 | ||
321 | /* Save the info structure */ | 325 | /* Save the info structure */ |
322 | reader->info = info; | 326 | reader->info = info; |
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index 82e350e9501c..ac75816ada7c 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c | |||
@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu) | |||
69 | struct snd_seq_oss_reg *arg; | 69 | struct snd_seq_oss_reg *arg; |
70 | struct snd_seq_device *dev; | 70 | struct snd_seq_device *dev; |
71 | 71 | ||
72 | if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, | 72 | /* using device#1 here for avoiding conflicts with OPL3 */ |
73 | if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, | ||
73 | sizeof(struct snd_seq_oss_reg), &dev) < 0) | 74 | sizeof(struct snd_seq_oss_reg), &dev) < 0) |
74 | return; | 75 | return; |
75 | 76 | ||
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature | |||
@@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ | |||
41 | libelf-getphdrnum \ | 41 | libelf-getphdrnum \ |
42 | libelf-mmap \ | 42 | libelf-mmap \ |
43 | libnuma \ | 43 | libnuma \ |
44 | numa_num_possible_cpus \ | ||
44 | libperl \ | 45 | libperl \ |
45 | libpython \ | 46 | libpython \ |
46 | libpython-version \ | 47 | libpython-version \ |
@@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ | |||
51 | timerfd \ | 52 | timerfd \ |
52 | libdw-dwarf-unwind \ | 53 | libdw-dwarf-unwind \ |
53 | zlib \ | 54 | zlib \ |
54 | lzma | 55 | lzma \ |
56 | get_cpuid | ||
55 | 57 | ||
56 | FEATURE_DISPLAY ?= \ | 58 | FEATURE_DISPLAY ?= \ |
57 | dwarf \ | 59 | dwarf \ |
@@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ | |||
61 | libbfd \ | 63 | libbfd \ |
62 | libelf \ | 64 | libelf \ |
63 | libnuma \ | 65 | libnuma \ |
66 | numa_num_possible_cpus \ | ||
64 | libperl \ | 67 | libperl \ |
65 | libpython \ | 68 | libpython \ |
66 | libslang \ | 69 | libslang \ |
67 | libunwind \ | 70 | libunwind \ |
68 | libdw-dwarf-unwind \ | 71 | libdw-dwarf-unwind \ |
69 | zlib \ | 72 | zlib \ |
70 | lzma | 73 | lzma \ |
74 | get_cpuid | ||
71 | 75 | ||
72 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. | 76 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. |
73 | # If in the future we need per-feature checks/flags for features not | 77 | # If in the future we need per-feature checks/flags for features not |
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile | |||
@@ -19,6 +19,7 @@ FILES= \ | |||
19 | test-libelf-getphdrnum.bin \ | 19 | test-libelf-getphdrnum.bin \ |
20 | test-libelf-mmap.bin \ | 20 | test-libelf-mmap.bin \ |
21 | test-libnuma.bin \ | 21 | test-libnuma.bin \ |
22 | test-numa_num_possible_cpus.bin \ | ||
22 | test-libperl.bin \ | 23 | test-libperl.bin \ |
23 | test-libpython.bin \ | 24 | test-libpython.bin \ |
24 | test-libpython-version.bin \ | 25 | test-libpython-version.bin \ |
@@ -34,7 +35,8 @@ FILES= \ | |||
34 | test-compile-x32.bin \ | 35 | test-compile-x32.bin \ |
35 | test-zlib.bin \ | 36 | test-zlib.bin \ |
36 | test-lzma.bin \ | 37 | test-lzma.bin \ |
37 | test-bpf.bin | 38 | test-bpf.bin \ |
39 | test-get_cpuid.bin | ||
38 | 40 | ||
39 | CC := $(CROSS_COMPILE)gcc -MD | 41 | CC := $(CROSS_COMPILE)gcc -MD |
40 | PKG_CONFIG := $(CROSS_COMPILE)pkg-config | 42 | PKG_CONFIG := $(CROSS_COMPILE)pkg-config |
@@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: | |||
87 | test-libnuma.bin: | 89 | test-libnuma.bin: |
88 | $(BUILD) -lnuma | 90 | $(BUILD) -lnuma |
89 | 91 | ||
92 | test-numa_num_possible_cpus.bin: | ||
93 | $(BUILD) -lnuma | ||
94 | |||
90 | test-libunwind.bin: | 95 | test-libunwind.bin: |
91 | $(BUILD) -lelf | 96 | $(BUILD) -lelf |
92 | 97 | ||
@@ -162,6 +167,9 @@ test-zlib.bin: | |||
162 | test-lzma.bin: | 167 | test-lzma.bin: |
163 | $(BUILD) -llzma | 168 | $(BUILD) -llzma |
164 | 169 | ||
170 | test-get_cpuid.bin: | ||
171 | $(BUILD) | ||
172 | |||
165 | test-bpf.bin: | 173 | test-bpf.bin: |
166 | $(BUILD) | 174 | $(BUILD) |
167 | 175 | ||
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c | |||
@@ -77,6 +77,10 @@ | |||
77 | # include "test-libnuma.c" | 77 | # include "test-libnuma.c" |
78 | #undef main | 78 | #undef main |
79 | 79 | ||
80 | #define main main_test_numa_num_possible_cpus | ||
81 | # include "test-numa_num_possible_cpus.c" | ||
82 | #undef main | ||
83 | |||
80 | #define main main_test_timerfd | 84 | #define main main_test_timerfd |
81 | # include "test-timerfd.c" | 85 | # include "test-timerfd.c" |
82 | #undef main | 86 | #undef main |
@@ -117,6 +121,10 @@ | |||
117 | # include "test-lzma.c" | 121 | # include "test-lzma.c" |
118 | #undef main | 122 | #undef main |
119 | 123 | ||
124 | #define main main_test_get_cpuid | ||
125 | # include "test-get_cpuid.c" | ||
126 | #undef main | ||
127 | |||
120 | int main(int argc, char *argv[]) | 128 | int main(int argc, char *argv[]) |
121 | { | 129 | { |
122 | main_test_libpython(); | 130 | main_test_libpython(); |
@@ -136,6 +144,7 @@ int main(int argc, char *argv[]) | |||
136 | main_test_libbfd(); | 144 | main_test_libbfd(); |
137 | main_test_backtrace(); | 145 | main_test_backtrace(); |
138 | main_test_libnuma(); | 146 | main_test_libnuma(); |
147 | main_test_numa_num_possible_cpus(); | ||
139 | main_test_timerfd(); | 148 | main_test_timerfd(); |
140 | main_test_stackprotector_all(); | 149 | main_test_stackprotector_all(); |
141 | main_test_libdw_dwarf_unwind(); | 150 | main_test_libdw_dwarf_unwind(); |
@@ -143,6 +152,7 @@ int main(int argc, char *argv[]) | |||
143 | main_test_zlib(); | 152 | main_test_zlib(); |
144 | main_test_pthread_attr_setaffinity_np(); | 153 | main_test_pthread_attr_setaffinity_np(); |
145 | main_test_lzma(); | 154 | main_test_lzma(); |
155 | main_test_get_cpuid(); | ||
146 | 156 | ||
147 | return 0; | 157 | return 0; |
148 | } | 158 | } |
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c | |||
@@ -0,0 +1,7 @@ | |||
1 | #include <cpuid.h> | ||
2 | |||
3 | int main(void) | ||
4 | { | ||
5 | unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; | ||
6 | return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); | ||
7 | } | ||
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c | |||
@@ -0,0 +1,6 @@ | |||
1 | #include <numa.h> | ||
2 | |||
3 | int main(void) | ||
4 | { | ||
5 | return numa_num_possible_cpus(); | ||
6 | } | ||
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3795 | struct format_field *field; | 3795 | struct format_field *field; |
3796 | struct printk_map *printk; | 3796 | struct printk_map *printk; |
3797 | long long val, fval; | 3797 | long long val, fval; |
3798 | unsigned long addr; | 3798 | unsigned long long addr; |
3799 | char *str; | 3799 | char *str; |
3800 | unsigned char *hex; | 3800 | unsigned char *hex; |
3801 | int print; | 3801 | int print; |
@@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3828 | */ | 3828 | */ |
3829 | if (!(field->flags & FIELD_IS_ARRAY) && | 3829 | if (!(field->flags & FIELD_IS_ARRAY) && |
3830 | field->size == pevent->long_size) { | 3830 | field->size == pevent->long_size) { |
3831 | addr = *(unsigned long *)(data + field->offset); | 3831 | |
3832 | /* Handle heterogeneous recording and processing | ||
3833 | * architectures | ||
3834 | * | ||
3835 | * CASE I: | ||
3836 | * Traces recorded on 32-bit devices (32-bit | ||
3837 | * addressing) and processed on 64-bit devices: | ||
3838 | * In this case, only 32 bits should be read. | ||
3839 | * | ||
3840 | * CASE II: | ||
3841 | * Traces recorded on 64 bit devices and processed | ||
3842 | * on 32-bit devices: | ||
3843 | * In this case, 64 bits must be read. | ||
3844 | */ | ||
3845 | addr = (pevent->long_size == 8) ? | ||
3846 | *(unsigned long long *)(data + field->offset) : | ||
3847 | (unsigned long long)*(unsigned int *)(data + field->offset); | ||
3848 | |||
3832 | /* Check if it matches a print format */ | 3849 | /* Check if it matches a print format */ |
3833 | printk = find_printk(pevent, addr); | 3850 | printk = find_printk(pevent, addr); |
3834 | if (printk) | 3851 | if (printk) |
3835 | trace_seq_puts(s, printk->printk); | 3852 | trace_seq_puts(s, printk->printk); |
3836 | else | 3853 | else |
3837 | trace_seq_printf(s, "%lx", addr); | 3854 | trace_seq_printf(s, "%llx", addr); |
3838 | break; | 3855 | break; |
3839 | } | 3856 | } |
3840 | str = malloc(len + 1); | 3857 | str = malloc(len + 1); |
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt | |||
@@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc | |||
364 | 364 | ||
365 | CYC packets are not requested by default. | 365 | CYC packets are not requested by default. |
366 | 366 | ||
367 | no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. | ||
368 | |||
369 | It stops the driver resetting the byte count to zero whenever | ||
370 | enabling the trace (for example on context switches) which in | ||
371 | turn results in no PSB being forced. However some processors | ||
372 | will produce a PSB anyway. | ||
373 | |||
374 | In any case, there is still a PSB when the trace is enabled for | ||
375 | the first time. | ||
376 | |||
377 | no_force_psb can be used to slightly decrease the trace size but | ||
378 | may make it harder for the decoder to recover from errors. | ||
379 | |||
380 | no_force_psb is not selected by default. | ||
381 | |||
382 | 367 | ||
383 | new snapshot option | 368 | new snapshot option |
384 | ------------------- | 369 | ------------------- |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index eb51325e8ad9..284a76e04628 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -768,8 +768,8 @@ static int process_exit_event(struct perf_tool *tool, | |||
768 | if (!evsel->attr.sample_id_all) { | 768 | if (!evsel->attr.sample_id_all) { |
769 | sample->cpu = 0; | 769 | sample->cpu = 0; |
770 | sample->time = 0; | 770 | sample->time = 0; |
771 | sample->tid = event->comm.tid; | 771 | sample->tid = event->fork.tid; |
772 | sample->pid = event->comm.pid; | 772 | sample->pid = event->fork.pid; |
773 | } | 773 | } |
774 | print_sample_start(sample, thread, evsel); | 774 | print_sample_start(sample, thread, evsel); |
775 | perf_event__fprintf(event, stdout); | 775 | perf_event__fprintf(event, stdout); |
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile | |||
@@ -573,9 +573,14 @@ ifndef NO_LIBNUMA | |||
573 | msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); | 573 | msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); |
574 | NO_LIBNUMA := 1 | 574 | NO_LIBNUMA := 1 |
575 | else | 575 | else |
576 | CFLAGS += -DHAVE_LIBNUMA_SUPPORT | 576 | ifeq ($(feature-numa_num_possible_cpus), 0) |
577 | EXTLIBS += -lnuma | 577 | msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); |
578 | $(call detected,CONFIG_NUMA) | 578 | NO_LIBNUMA := 1 |
579 | else | ||
580 | CFLAGS += -DHAVE_LIBNUMA_SUPPORT | ||
581 | EXTLIBS += -lnuma | ||
582 | $(call detected,CONFIG_NUMA) | ||
583 | endif | ||
579 | endif | 584 | endif |
580 | endif | 585 | endif |
581 | 586 | ||
@@ -621,8 +626,13 @@ ifdef LIBBABELTRACE | |||
621 | endif | 626 | endif |
622 | 627 | ||
623 | ifndef NO_AUXTRACE | 628 | ifndef NO_AUXTRACE |
624 | $(call detected,CONFIG_AUXTRACE) | 629 | ifeq ($(feature-get_cpuid), 0) |
625 | CFLAGS += -DHAVE_AUXTRACE_SUPPORT | 630 | msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); |
631 | NO_AUXTRACE := 1 | ||
632 | else | ||
633 | $(call detected,CONFIG_AUXTRACE) | ||
634 | CFLAGS += -DHAVE_AUXTRACE_SUPPORT | ||
635 | endif | ||
626 | endif | 636 | endif |
627 | 637 | ||
628 | # Among the variables below, these: | 638 | # Among the variables below, these: |
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 1aa21c90731b..5b83f56a3b6f 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c | |||
@@ -34,6 +34,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) | |||
34 | .disabled = 1, | 34 | .disabled = 1, |
35 | .freq = 1, | 35 | .freq = 1, |
36 | }; | 36 | }; |
37 | struct cpu_map *cpus; | ||
38 | struct thread_map *threads; | ||
37 | 39 | ||
38 | attr.sample_freq = 500; | 40 | attr.sample_freq = 500; |
39 | 41 | ||
@@ -50,14 +52,19 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) | |||
50 | } | 52 | } |
51 | perf_evlist__add(evlist, evsel); | 53 | perf_evlist__add(evlist, evsel); |
52 | 54 | ||
53 | evlist->cpus = cpu_map__dummy_new(); | 55 | cpus = cpu_map__dummy_new(); |
54 | evlist->threads = thread_map__new_by_tid(getpid()); | 56 | threads = thread_map__new_by_tid(getpid()); |
55 | if (!evlist->cpus || !evlist->threads) { | 57 | if (!cpus || !threads) { |
56 | err = -ENOMEM; | 58 | err = -ENOMEM; |
57 | pr_debug("Not enough memory to create thread/cpu maps\n"); | 59 | pr_debug("Not enough memory to create thread/cpu maps\n"); |
58 | goto out_delete_evlist; | 60 | goto out_free_maps; |
59 | } | 61 | } |
60 | 62 | ||
63 | perf_evlist__set_maps(evlist, cpus, threads); | ||
64 | |||
65 | cpus = NULL; | ||
66 | threads = NULL; | ||
67 | |||
61 | if (perf_evlist__open(evlist)) { | 68 | if (perf_evlist__open(evlist)) { |
62 | const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; | 69 | const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; |
63 | 70 | ||
@@ -107,6 +114,9 @@ next_event: | |||
107 | err = -1; | 114 | err = -1; |
108 | } | 115 | } |
109 | 116 | ||
117 | out_free_maps: | ||
118 | cpu_map__put(cpus); | ||
119 | thread_map__put(threads); | ||
110 | out_delete_evlist: | 120 | out_delete_evlist: |
111 | perf_evlist__delete(evlist); | 121 | perf_evlist__delete(evlist); |
112 | return err; | 122 | return err; |
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 3a8fedef83bc..add16385f13e 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c | |||
@@ -43,6 +43,8 @@ int test__task_exit(void) | |||
43 | }; | 43 | }; |
44 | const char *argv[] = { "true", NULL }; | 44 | const char *argv[] = { "true", NULL }; |
45 | char sbuf[STRERR_BUFSIZE]; | 45 | char sbuf[STRERR_BUFSIZE]; |
46 | struct cpu_map *cpus; | ||
47 | struct thread_map *threads; | ||
46 | 48 | ||
47 | signal(SIGCHLD, sig_handler); | 49 | signal(SIGCHLD, sig_handler); |
48 | 50 | ||
@@ -58,14 +60,19 @@ int test__task_exit(void) | |||
58 | * perf_evlist__prepare_workload we'll fill in the only thread | 60 | * perf_evlist__prepare_workload we'll fill in the only thread |
59 | * we're monitoring, the one forked there. | 61 | * we're monitoring, the one forked there. |
60 | */ | 62 | */ |
61 | evlist->cpus = cpu_map__dummy_new(); | 63 | cpus = cpu_map__dummy_new(); |
62 | evlist->threads = thread_map__new_by_tid(-1); | 64 | threads = thread_map__new_by_tid(-1); |
63 | if (!evlist->cpus || !evlist->threads) { | 65 | if (!cpus || !threads) { |
64 | err = -ENOMEM; | 66 | err = -ENOMEM; |
65 | pr_debug("Not enough memory to create thread/cpu maps\n"); | 67 | pr_debug("Not enough memory to create thread/cpu maps\n"); |
66 | goto out_delete_evlist; | 68 | goto out_free_maps; |
67 | } | 69 | } |
68 | 70 | ||
71 | perf_evlist__set_maps(evlist, cpus, threads); | ||
72 | |||
73 | cpus = NULL; | ||
74 | threads = NULL; | ||
75 | |||
69 | err = perf_evlist__prepare_workload(evlist, &target, argv, false, | 76 | err = perf_evlist__prepare_workload(evlist, &target, argv, false, |
70 | workload_exec_failed_signal); | 77 | workload_exec_failed_signal); |
71 | if (err < 0) { | 78 | if (err < 0) { |
@@ -114,6 +121,9 @@ retry: | |||
114 | err = -1; | 121 | err = -1; |
115 | } | 122 | } |
116 | 123 | ||
124 | out_free_maps: | ||
125 | cpu_map__put(cpus); | ||
126 | thread_map__put(threads); | ||
117 | out_delete_evlist: | 127 | out_delete_evlist: |
118 | perf_evlist__delete(evlist); | 128 | perf_evlist__delete(evlist); |
119 | return err; | 129 | return err; |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index cf86f2d3a5e7..c04c60d4863c 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -1968,7 +1968,8 @@ skip_annotation: | |||
1968 | &options[nr_options], dso); | 1968 | &options[nr_options], dso); |
1969 | nr_options += add_map_opt(browser, &actions[nr_options], | 1969 | nr_options += add_map_opt(browser, &actions[nr_options], |
1970 | &options[nr_options], | 1970 | &options[nr_options], |
1971 | browser->selection->map); | 1971 | browser->selection ? |
1972 | browser->selection->map : NULL); | ||
1972 | 1973 | ||
1973 | /* perf script support */ | 1974 | /* perf script support */ |
1974 | if (browser->he_selection) { | 1975 | if (browser->he_selection) { |
@@ -1976,6 +1977,15 @@ skip_annotation: | |||
1976 | &actions[nr_options], | 1977 | &actions[nr_options], |
1977 | &options[nr_options], | 1978 | &options[nr_options], |
1978 | thread, NULL); | 1979 | thread, NULL); |
1980 | /* | ||
1981 | * Note that browser->selection != NULL | ||
1982 | * when browser->he_selection is not NULL, | ||
1983 | * so we don't need to check browser->selection | ||
1984 | * before fetching browser->selection->sym like what | ||
1985 | * we do before fetching browser->selection->map. | ||
1986 | * | ||
1987 | * See hist_browser__show_entry. | ||
1988 | */ | ||
1979 | nr_options += add_script_opt(browser, | 1989 | nr_options += add_script_opt(browser, |
1980 | &actions[nr_options], | 1990 | &actions[nr_options], |
1981 | &options[nr_options], | 1991 | &options[nr_options], |
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 349bc96ca1fe..e5f18a288b74 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build | |||
@@ -17,6 +17,7 @@ libperf-y += levenshtein.o | |||
17 | libperf-y += llvm-utils.o | 17 | libperf-y += llvm-utils.o |
18 | libperf-y += parse-options.o | 18 | libperf-y += parse-options.o |
19 | libperf-y += parse-events.o | 19 | libperf-y += parse-events.o |
20 | libperf-y += perf_regs.o | ||
20 | libperf-y += path.o | 21 | libperf-y += path.o |
21 | libperf-y += rbtree.o | 22 | libperf-y += rbtree.o |
22 | libperf-y += bitmap.o | 23 | libperf-y += bitmap.o |
@@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o | |||
103 | 104 | ||
104 | libperf-y += scripting-engines/ | 105 | libperf-y += scripting-engines/ |
105 | 106 | ||
106 | libperf-$(CONFIG_PERF_REGS) += perf_regs.o | ||
107 | libperf-$(CONFIG_ZLIB) += zlib.o | 107 | libperf-$(CONFIG_ZLIB) += zlib.o |
108 | libperf-$(CONFIG_LZMA) += lzma.o | 108 | libperf-$(CONFIG_LZMA) += lzma.o |
109 | 109 | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index d51a5200c8af..c8fc8a258f42 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -124,6 +124,33 @@ void perf_evlist__delete(struct perf_evlist *evlist) | |||
124 | free(evlist); | 124 | free(evlist); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, | ||
128 | struct perf_evsel *evsel) | ||
129 | { | ||
130 | /* | ||
131 | * We already have cpus for evsel (via PMU sysfs) so | ||
132 | * keep it, if there's no target cpu list defined. | ||
133 | */ | ||
134 | if (!evsel->own_cpus || evlist->has_user_cpus) { | ||
135 | cpu_map__put(evsel->cpus); | ||
136 | evsel->cpus = cpu_map__get(evlist->cpus); | ||
137 | } else if (evsel->cpus != evsel->own_cpus) { | ||
138 | cpu_map__put(evsel->cpus); | ||
139 | evsel->cpus = cpu_map__get(evsel->own_cpus); | ||
140 | } | ||
141 | |||
142 | thread_map__put(evsel->threads); | ||
143 | evsel->threads = thread_map__get(evlist->threads); | ||
144 | } | ||
145 | |||
146 | static void perf_evlist__propagate_maps(struct perf_evlist *evlist) | ||
147 | { | ||
148 | struct perf_evsel *evsel; | ||
149 | |||
150 | evlist__for_each(evlist, evsel) | ||
151 | __perf_evlist__propagate_maps(evlist, evsel); | ||
152 | } | ||
153 | |||
127 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | 154 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
128 | { | 155 | { |
129 | entry->evlist = evlist; | 156 | entry->evlist = evlist; |
@@ -133,18 +160,19 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |||
133 | 160 | ||
134 | if (!evlist->nr_entries++) | 161 | if (!evlist->nr_entries++) |
135 | perf_evlist__set_id_pos(evlist); | 162 | perf_evlist__set_id_pos(evlist); |
163 | |||
164 | __perf_evlist__propagate_maps(evlist, entry); | ||
136 | } | 165 | } |
137 | 166 | ||
138 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | 167 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
139 | struct list_head *list, | 168 | struct list_head *list) |
140 | int nr_entries) | ||
141 | { | 169 | { |
142 | bool set_id_pos = !evlist->nr_entries; | 170 | struct perf_evsel *evsel, *temp; |
143 | 171 | ||
144 | list_splice_tail(list, &evlist->entries); | 172 | __evlist__for_each_safe(list, temp, evsel) { |
145 | evlist->nr_entries += nr_entries; | 173 | list_del_init(&evsel->node); |
146 | if (set_id_pos) | 174 | perf_evlist__add(evlist, evsel); |
147 | perf_evlist__set_id_pos(evlist); | 175 | } |
148 | } | 176 | } |
149 | 177 | ||
150 | void __perf_evlist__set_leader(struct list_head *list) | 178 | void __perf_evlist__set_leader(struct list_head *list) |
@@ -210,7 +238,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, | |||
210 | list_add_tail(&evsel->node, &head); | 238 | list_add_tail(&evsel->node, &head); |
211 | } | 239 | } |
212 | 240 | ||
213 | perf_evlist__splice_list_tail(evlist, &head, nr_attrs); | 241 | perf_evlist__splice_list_tail(evlist, &head); |
214 | 242 | ||
215 | return 0; | 243 | return 0; |
216 | 244 | ||
@@ -1103,71 +1131,56 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, | |||
1103 | return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); | 1131 | return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); |
1104 | } | 1132 | } |
1105 | 1133 | ||
1106 | static int perf_evlist__propagate_maps(struct perf_evlist *evlist, | ||
1107 | bool has_user_cpus) | ||
1108 | { | ||
1109 | struct perf_evsel *evsel; | ||
1110 | |||
1111 | evlist__for_each(evlist, evsel) { | ||
1112 | /* | ||
1113 | * We already have cpus for evsel (via PMU sysfs) so | ||
1114 | * keep it, if there's no target cpu list defined. | ||
1115 | */ | ||
1116 | if (evsel->cpus && has_user_cpus) | ||
1117 | cpu_map__put(evsel->cpus); | ||
1118 | |||
1119 | if (!evsel->cpus || has_user_cpus) | ||
1120 | evsel->cpus = cpu_map__get(evlist->cpus); | ||
1121 | |||
1122 | evsel->threads = thread_map__get(evlist->threads); | ||
1123 | |||
1124 | if ((evlist->cpus && !evsel->cpus) || | ||
1125 | (evlist->threads && !evsel->threads)) | ||
1126 | return -ENOMEM; | ||
1127 | } | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) | 1134 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
1133 | { | 1135 | { |
1134 | evlist->threads = thread_map__new_str(target->pid, target->tid, | 1136 | struct cpu_map *cpus; |
1135 | target->uid); | 1137 | struct thread_map *threads; |
1138 | |||
1139 | threads = thread_map__new_str(target->pid, target->tid, target->uid); | ||
1136 | 1140 | ||
1137 | if (evlist->threads == NULL) | 1141 | if (!threads) |
1138 | return -1; | 1142 | return -1; |
1139 | 1143 | ||
1140 | if (target__uses_dummy_map(target)) | 1144 | if (target__uses_dummy_map(target)) |
1141 | evlist->cpus = cpu_map__dummy_new(); | 1145 | cpus = cpu_map__dummy_new(); |
1142 | else | 1146 | else |
1143 | evlist->cpus = cpu_map__new(target->cpu_list); | 1147 | cpus = cpu_map__new(target->cpu_list); |
1144 | 1148 | ||
1145 | if (evlist->cpus == NULL) | 1149 | if (!cpus) |
1146 | goto out_delete_threads; | 1150 | goto out_delete_threads; |
1147 | 1151 | ||
1148 | return perf_evlist__propagate_maps(evlist, !!target->cpu_list); | 1152 | evlist->has_user_cpus = !!target->cpu_list; |
1153 | |||
1154 | perf_evlist__set_maps(evlist, cpus, threads); | ||
1155 | |||
1156 | return 0; | ||
1149 | 1157 | ||
1150 | out_delete_threads: | 1158 | out_delete_threads: |
1151 | thread_map__put(evlist->threads); | 1159 | thread_map__put(threads); |
1152 | evlist->threads = NULL; | ||
1153 | return -1; | 1160 | return -1; |
1154 | } | 1161 | } |
1155 | 1162 | ||
1156 | int perf_evlist__set_maps(struct perf_evlist *evlist, | 1163 | void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
1157 | struct cpu_map *cpus, | 1164 | struct thread_map *threads) |
1158 | struct thread_map *threads) | ||
1159 | { | 1165 | { |
1160 | if (evlist->cpus) | 1166 | /* |
1167 | * Allow for the possibility that one or another of the maps isn't being | ||
1168 | * changed i.e. don't put it. Note we are assuming the maps that are | ||
1169 | * being applied are brand new and evlist is taking ownership of the | ||
1170 | * original reference count of 1. If that is not the case it is up to | ||
1171 | * the caller to increase the reference count. | ||
1172 | */ | ||
1173 | if (cpus != evlist->cpus) { | ||
1161 | cpu_map__put(evlist->cpus); | 1174 | cpu_map__put(evlist->cpus); |
1175 | evlist->cpus = cpus; | ||
1176 | } | ||
1162 | 1177 | ||
1163 | evlist->cpus = cpus; | 1178 | if (threads != evlist->threads) { |
1164 | |||
1165 | if (evlist->threads) | ||
1166 | thread_map__put(evlist->threads); | 1179 | thread_map__put(evlist->threads); |
1180 | evlist->threads = threads; | ||
1181 | } | ||
1167 | 1182 | ||
1168 | evlist->threads = threads; | 1183 | perf_evlist__propagate_maps(evlist); |
1169 | |||
1170 | return perf_evlist__propagate_maps(evlist, false); | ||
1171 | } | 1184 | } |
1172 | 1185 | ||
1173 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) | 1186 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) |
@@ -1387,6 +1400,8 @@ void perf_evlist__close(struct perf_evlist *evlist) | |||
1387 | 1400 | ||
1388 | static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) | 1401 | static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) |
1389 | { | 1402 | { |
1403 | struct cpu_map *cpus; | ||
1404 | struct thread_map *threads; | ||
1390 | int err = -ENOMEM; | 1405 | int err = -ENOMEM; |
1391 | 1406 | ||
1392 | /* | 1407 | /* |
@@ -1398,20 +1413,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) | |||
1398 | * error, and we may not want to do that fallback to a | 1413 | * error, and we may not want to do that fallback to a |
1399 | * default cpu identity map :-\ | 1414 | * default cpu identity map :-\ |
1400 | */ | 1415 | */ |
1401 | evlist->cpus = cpu_map__new(NULL); | 1416 | cpus = cpu_map__new(NULL); |
1402 | if (evlist->cpus == NULL) | 1417 | if (!cpus) |
1403 | goto out; | 1418 | goto out; |
1404 | 1419 | ||
1405 | evlist->threads = thread_map__new_dummy(); | 1420 | threads = thread_map__new_dummy(); |
1406 | if (evlist->threads == NULL) | 1421 | if (!threads) |
1407 | goto out_free_cpus; | 1422 | goto out_put; |
1408 | 1423 | ||
1409 | err = 0; | 1424 | perf_evlist__set_maps(evlist, cpus, threads); |
1410 | out: | 1425 | out: |
1411 | return err; | 1426 | return err; |
1412 | out_free_cpus: | 1427 | out_put: |
1413 | cpu_map__put(evlist->cpus); | 1428 | cpu_map__put(cpus); |
1414 | evlist->cpus = NULL; | ||
1415 | goto out; | 1429 | goto out; |
1416 | } | 1430 | } |
1417 | 1431 | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index b39a6198f4ac..115d8b53c601 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -42,6 +42,7 @@ struct perf_evlist { | |||
42 | int nr_mmaps; | 42 | int nr_mmaps; |
43 | bool overwrite; | 43 | bool overwrite; |
44 | bool enabled; | 44 | bool enabled; |
45 | bool has_user_cpus; | ||
45 | size_t mmap_len; | 46 | size_t mmap_len; |
46 | int id_pos; | 47 | int id_pos; |
47 | int is_pos; | 48 | int is_pos; |
@@ -155,9 +156,8 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist, | |||
155 | void perf_evlist__set_selected(struct perf_evlist *evlist, | 156 | void perf_evlist__set_selected(struct perf_evlist *evlist, |
156 | struct perf_evsel *evsel); | 157 | struct perf_evsel *evsel); |
157 | 158 | ||
158 | int perf_evlist__set_maps(struct perf_evlist *evlist, | 159 | void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
159 | struct cpu_map *cpus, | 160 | struct thread_map *threads); |
160 | struct thread_map *threads); | ||
161 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); | 161 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); |
162 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); | 162 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); |
163 | 163 | ||
@@ -179,8 +179,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); | |||
179 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist); | 179 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist); |
180 | 180 | ||
181 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | 181 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
182 | struct list_head *list, | 182 | struct list_head *list); |
183 | int nr_entries); | ||
184 | 183 | ||
185 | static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) | 184 | static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) |
186 | { | 185 | { |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c53f79123b37..5410483d5219 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -1033,6 +1033,7 @@ void perf_evsel__exit(struct perf_evsel *evsel) | |||
1033 | perf_evsel__free_config_terms(evsel); | 1033 | perf_evsel__free_config_terms(evsel); |
1034 | close_cgroup(evsel->cgrp); | 1034 | close_cgroup(evsel->cgrp); |
1035 | cpu_map__put(evsel->cpus); | 1035 | cpu_map__put(evsel->cpus); |
1036 | cpu_map__put(evsel->own_cpus); | ||
1036 | thread_map__put(evsel->threads); | 1037 | thread_map__put(evsel->threads); |
1037 | zfree(&evsel->group_name); | 1038 | zfree(&evsel->group_name); |
1038 | zfree(&evsel->name); | 1039 | zfree(&evsel->name); |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 298e6bbca200..ef8925f7211a 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -98,6 +98,7 @@ struct perf_evsel { | |||
98 | struct cgroup_sel *cgrp; | 98 | struct cgroup_sel *cgrp; |
99 | void *handler; | 99 | void *handler; |
100 | struct cpu_map *cpus; | 100 | struct cpu_map *cpus; |
101 | struct cpu_map *own_cpus; | ||
101 | struct thread_map *threads; | 102 | struct thread_map *threads; |
102 | unsigned int sample_size; | 103 | unsigned int sample_size; |
103 | int id_pos; | 104 | int id_pos; |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 41814547da15..fce6634aebe2 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -1438,7 +1438,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, | |||
1438 | if (ph->needs_swap) | 1438 | if (ph->needs_swap) |
1439 | nr = bswap_32(nr); | 1439 | nr = bswap_32(nr); |
1440 | 1440 | ||
1441 | ph->env.nr_cpus_online = nr; | 1441 | ph->env.nr_cpus_avail = nr; |
1442 | 1442 | ||
1443 | ret = readn(fd, &nr, sizeof(nr)); | 1443 | ret = readn(fd, &nr, sizeof(nr)); |
1444 | if (ret != sizeof(nr)) | 1444 | if (ret != sizeof(nr)) |
@@ -1447,7 +1447,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, | |||
1447 | if (ph->needs_swap) | 1447 | if (ph->needs_swap) |
1448 | nr = bswap_32(nr); | 1448 | nr = bswap_32(nr); |
1449 | 1449 | ||
1450 | ph->env.nr_cpus_avail = nr; | 1450 | ph->env.nr_cpus_online = nr; |
1451 | return 0; | 1451 | return 0; |
1452 | } | 1452 | } |
1453 | 1453 | ||
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index ea768625ab5b..eb0e7f8bf515 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c | |||
@@ -623,7 +623,7 @@ static int intel_bts_process_event(struct perf_session *session, | |||
623 | if (err) | 623 | if (err) |
624 | return err; | 624 | return err; |
625 | if (event->header.type == PERF_RECORD_EXIT) { | 625 | if (event->header.type == PERF_RECORD_EXIT) { |
626 | err = intel_bts_process_tid_exit(bts, event->comm.tid); | 626 | err = intel_bts_process_tid_exit(bts, event->fork.tid); |
627 | if (err) | 627 | if (err) |
628 | return err; | 628 | return err; |
629 | } | 629 | } |
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index bb41c20e6005..535d86f8e4d1 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c | |||
@@ -1494,7 +1494,7 @@ static int intel_pt_process_event(struct perf_session *session, | |||
1494 | if (pt->timeless_decoding) { | 1494 | if (pt->timeless_decoding) { |
1495 | if (event->header.type == PERF_RECORD_EXIT) { | 1495 | if (event->header.type == PERF_RECORD_EXIT) { |
1496 | err = intel_pt_process_timeless_queues(pt, | 1496 | err = intel_pt_process_timeless_queues(pt, |
1497 | event->comm.tid, | 1497 | event->fork.tid, |
1498 | sample->time); | 1498 | sample->time); |
1499 | } | 1499 | } |
1500 | } else if (timestamp) { | 1500 | } else if (timestamp) { |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d826e6f515db..21ed6ee63da9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -287,8 +287,8 @@ __add_event(struct list_head *list, int *idx, | |||
287 | if (!evsel) | 287 | if (!evsel) |
288 | return NULL; | 288 | return NULL; |
289 | 289 | ||
290 | if (cpus) | 290 | evsel->cpus = cpu_map__get(cpus); |
291 | evsel->cpus = cpu_map__get(cpus); | 291 | evsel->own_cpus = cpu_map__get(cpus); |
292 | 292 | ||
293 | if (name) | 293 | if (name) |
294 | evsel->name = strdup(name); | 294 | evsel->name = strdup(name); |
@@ -1140,10 +1140,9 @@ int parse_events(struct perf_evlist *evlist, const char *str, | |||
1140 | ret = parse_events__scanner(str, &data, PE_START_EVENTS); | 1140 | ret = parse_events__scanner(str, &data, PE_START_EVENTS); |
1141 | perf_pmu__parse_cleanup(); | 1141 | perf_pmu__parse_cleanup(); |
1142 | if (!ret) { | 1142 | if (!ret) { |
1143 | int entries = data.idx - evlist->nr_entries; | ||
1144 | struct perf_evsel *last; | 1143 | struct perf_evsel *last; |
1145 | 1144 | ||
1146 | perf_evlist__splice_list_tail(evlist, &data.list, entries); | 1145 | perf_evlist__splice_list_tail(evlist, &data.list); |
1147 | evlist->nr_groups += data.nr_groups; | 1146 | evlist->nr_groups += data.nr_groups; |
1148 | last = perf_evlist__last(evlist); | 1147 | last = perf_evlist__last(evlist); |
1149 | last->cmdline_group_boundary = true; | 1148 | last->cmdline_group_boundary = true; |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 591905a02b92..9cd70819c795 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y | |||
@@ -255,7 +255,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc | |||
255 | list_add_tail(&term->list, head); | 255 | list_add_tail(&term->list, head); |
256 | 256 | ||
257 | ALLOC_LIST(list); | 257 | ALLOC_LIST(list); |
258 | ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); | 258 | ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); |
259 | parse_events__free_terms(head); | 259 | parse_events__free_terms(head); |
260 | $$ = list; | 260 | $$ = list; |
261 | } | 261 | } |
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 885e8ac83997..6b8eb13e14e4 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c | |||
@@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = { | |||
6 | SMPL_REG_END | 6 | SMPL_REG_END |
7 | }; | 7 | }; |
8 | 8 | ||
9 | #ifdef HAVE_PERF_REGS_SUPPORT | ||
9 | int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) | 10 | int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) |
10 | { | 11 | { |
11 | int i, idx = 0; | 12 | int i, idx = 0; |
@@ -29,3 +30,4 @@ out: | |||
29 | *valp = regs->cache_regs[id]; | 30 | *valp = regs->cache_regs[id]; |
30 | return 0; | 31 | return 0; |
31 | } | 32 | } |
33 | #endif | ||
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 2984dcc54d67..679d6e493962 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __PERF_REGS_H | 2 | #define __PERF_REGS_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/compiler.h> | ||
5 | 6 | ||
6 | struct regs_dump; | 7 | struct regs_dump; |
7 | 8 | ||
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) | |||
270 | int ret = 0; | 270 | int ret = 0; |
271 | 271 | ||
272 | if (module) { | 272 | if (module) { |
273 | list_for_each_entry(dso, &host_machine->dsos.head, node) { | 273 | char module_name[128]; |
274 | if (!dso->kernel) | 274 | |
275 | continue; | 275 | snprintf(module_name, sizeof(module_name), "[%s]", module); |
276 | if (strncmp(dso->short_name + 1, module, | 276 | map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); |
277 | dso->short_name_len - 2) == 0) | 277 | if (map) { |
278 | goto found; | 278 | dso = map->dso; |
279 | goto found; | ||
279 | } | 280 | } |
280 | pr_debug("Failed to find module %s.\n", module); | 281 | pr_debug("Failed to find module %s.\n", module); |
281 | return -ENOENT; | 282 | return -ENOENT; |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, | |||
1580 | file_offset = page_offset; | 1580 | file_offset = page_offset; |
1581 | head = data_offset - page_offset; | 1581 | head = data_offset - page_offset; |
1582 | 1582 | ||
1583 | if (data_size && (data_offset + data_size < file_size)) | 1583 | if (data_size == 0) |
1584 | goto out; | ||
1585 | |||
1586 | if (data_offset + data_size < file_size) | ||
1584 | file_size = data_offset + data_size; | 1587 | file_size = data_offset + data_size; |
1585 | 1588 | ||
1586 | ui_progress__init(&prog, file_size, "Processing events..."); | 1589 | ui_progress__init(&prog, file_size, "Processing events..."); |
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c | |||
@@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) | |||
196 | memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); | 196 | memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) | 199 | static int check_per_pkg(struct perf_evsel *counter, |
200 | struct perf_counts_values *vals, int cpu, bool *skip) | ||
200 | { | 201 | { |
201 | unsigned long *mask = counter->per_pkg_mask; | 202 | unsigned long *mask = counter->per_pkg_mask; |
202 | struct cpu_map *cpus = perf_evsel__cpus(counter); | 203 | struct cpu_map *cpus = perf_evsel__cpus(counter); |
@@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) | |||
218 | counter->per_pkg_mask = mask; | 219 | counter->per_pkg_mask = mask; |
219 | } | 220 | } |
220 | 221 | ||
222 | /* | ||
223 | * we do not consider an event that has not run as a good | ||
224 | * instance to mark a package as used (skip=1). Otherwise | ||
225 | * we may run into a situation where the first CPU in a package | ||
226 | * is not running anything, yet the second is, and this function | ||
227 | * would mark the package as used after the first CPU and would | ||
228 | * not read the values from the second CPU. | ||
229 | */ | ||
230 | if (!(vals->run && vals->ena)) | ||
231 | return 0; | ||
232 | |||
221 | s = cpu_map__get_socket(cpus, cpu); | 233 | s = cpu_map__get_socket(cpus, cpu); |
222 | if (s < 0) | 234 | if (s < 0) |
223 | return -1; | 235 | return -1; |
@@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel | |||
235 | static struct perf_counts_values zero; | 247 | static struct perf_counts_values zero; |
236 | bool skip = false; | 248 | bool skip = false; |
237 | 249 | ||
238 | if (check_per_pkg(evsel, cpu, &skip)) { | 250 | if (check_per_pkg(evsel, count, cpu, &skip)) { |
239 | pr_err("failed to read per-pkg counter\n"); | 251 | pr_err("failed to read per-pkg counter\n"); |
240 | return -1; | 252 | return -1; |
241 | } | 253 | } |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT | 40 | #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT |
41 | int elf_getphdrnum(Elf *elf, size_t *dst) | 41 | static int elf_getphdrnum(Elf *elf, size_t *dst) |
42 | { | 42 | { |
43 | GElf_Ehdr gehdr; | 43 | GElf_Ehdr gehdr; |
44 | GElf_Ehdr *ehdr; | 44 | GElf_Ehdr *ehdr; |
@@ -1271,8 +1271,6 @@ out_close: | |||
1271 | static int kcore__init(struct kcore *kcore, char *filename, int elfclass, | 1271 | static int kcore__init(struct kcore *kcore, char *filename, int elfclass, |
1272 | bool temp) | 1272 | bool temp) |
1273 | { | 1273 | { |
1274 | GElf_Ehdr *ehdr; | ||
1275 | |||
1276 | kcore->elfclass = elfclass; | 1274 | kcore->elfclass = elfclass; |
1277 | 1275 | ||
1278 | if (temp) | 1276 | if (temp) |
@@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, | |||
1289 | if (!gelf_newehdr(kcore->elf, elfclass)) | 1287 | if (!gelf_newehdr(kcore->elf, elfclass)) |
1290 | goto out_end; | 1288 | goto out_end; |
1291 | 1289 | ||
1292 | ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); | 1290 | memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); |
1293 | if (!ehdr) | ||
1294 | goto out_end; | ||
1295 | 1291 | ||
1296 | return 0; | 1292 | return 0; |
1297 | 1293 | ||
@@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) | |||
1348 | static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, | 1344 | static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, |
1349 | u64 addr, u64 len) | 1345 | u64 addr, u64 len) |
1350 | { | 1346 | { |
1351 | GElf_Phdr gphdr; | 1347 | GElf_Phdr phdr = { |
1352 | GElf_Phdr *phdr; | 1348 | .p_type = PT_LOAD, |
1353 | 1349 | .p_flags = PF_R | PF_W | PF_X, | |
1354 | phdr = gelf_getphdr(kcore->elf, idx, &gphdr); | 1350 | .p_offset = offset, |
1355 | if (!phdr) | 1351 | .p_vaddr = addr, |
1356 | return -1; | 1352 | .p_paddr = 0, |
1357 | 1353 | .p_filesz = len, | |
1358 | phdr->p_type = PT_LOAD; | 1354 | .p_memsz = len, |
1359 | phdr->p_flags = PF_R | PF_W | PF_X; | 1355 | .p_align = page_size, |
1360 | phdr->p_offset = offset; | 1356 | }; |
1361 | phdr->p_vaddr = addr; | 1357 | |
1362 | phdr->p_paddr = 0; | 1358 | if (!gelf_update_phdr(kcore->elf, idx, &phdr)) |
1363 | phdr->p_filesz = len; | ||
1364 | phdr->p_memsz = len; | ||
1365 | phdr->p_align = page_size; | ||
1366 | |||
1367 | if (!gelf_update_phdr(kcore->elf, idx, phdr)) | ||
1368 | return -1; | 1359 | return -1; |
1369 | 1360 | ||
1370 | return 0; | 1361 | return 0; |
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -709,7 +709,7 @@ bool find_process(const char *name) | |||
709 | 709 | ||
710 | dir = opendir(procfs__mountpoint()); | 710 | dir = opendir(procfs__mountpoint()); |
711 | if (!dir) | 711 | if (!dir) |
712 | return -1; | 712 | return false; |
713 | 713 | ||
714 | /* Walk through the directory. */ | 714 | /* Walk through the directory. */ |
715 | while (ret && (d = readdir(dir)) != NULL) { | 715 | while (ret && (d = readdir(dir)) != NULL) { |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; | |||
71 | unsigned int extra_msr_offset64; | 71 | unsigned int extra_msr_offset64; |
72 | unsigned int extra_delta_offset32; | 72 | unsigned int extra_delta_offset32; |
73 | unsigned int extra_delta_offset64; | 73 | unsigned int extra_delta_offset64; |
74 | unsigned int aperf_mperf_multiplier = 1; | ||
74 | int do_smi; | 75 | int do_smi; |
75 | double bclk; | 76 | double bclk; |
77 | double base_hz; | ||
78 | double tsc_tweak = 1.0; | ||
76 | unsigned int show_pkg; | 79 | unsigned int show_pkg; |
77 | unsigned int show_core; | 80 | unsigned int show_core; |
78 | unsigned int show_cpu; | 81 | unsigned int show_cpu; |
@@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
502 | /* %Busy */ | 505 | /* %Busy */ |
503 | if (has_aperf) { | 506 | if (has_aperf) { |
504 | if (!skip_c0) | 507 | if (!skip_c0) |
505 | outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); | 508 | outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); |
506 | else | 509 | else |
507 | outp += sprintf(outp, "********"); | 510 | outp += sprintf(outp, "********"); |
508 | } | 511 | } |
@@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
510 | /* Bzy_MHz */ | 513 | /* Bzy_MHz */ |
511 | if (has_aperf) | 514 | if (has_aperf) |
512 | outp += sprintf(outp, "%8.0f", | 515 | outp += sprintf(outp, "%8.0f", |
513 | 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); | 516 | 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); |
514 | 517 | ||
515 | /* TSC_MHz */ | 518 | /* TSC_MHz */ |
516 | outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); | 519 | outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); |
@@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
984 | return -3; | 987 | return -3; |
985 | if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) | 988 | if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) |
986 | return -4; | 989 | return -4; |
990 | t->aperf = t->aperf * aperf_mperf_multiplier; | ||
991 | t->mperf = t->mperf * aperf_mperf_multiplier; | ||
987 | } | 992 | } |
988 | 993 | ||
989 | if (do_smi) { | 994 | if (do_smi) { |
@@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, | |||
1149 | int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; | 1154 | int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; |
1150 | int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; | 1155 | int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; |
1151 | 1156 | ||
1157 | |||
1158 | static void | ||
1159 | calculate_tsc_tweak() | ||
1160 | { | ||
1161 | unsigned long long msr; | ||
1162 | unsigned int base_ratio; | ||
1163 | |||
1164 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | ||
1165 | base_ratio = (msr >> 8) & 0xFF; | ||
1166 | base_hz = base_ratio * bclk * 1000000; | ||
1167 | tsc_tweak = base_hz / tsc_hz; | ||
1168 | } | ||
1169 | |||
1152 | static void | 1170 | static void |
1153 | dump_nhm_platform_info(void) | 1171 | dump_nhm_platform_info(void) |
1154 | { | 1172 | { |
@@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) | |||
1926 | 1944 | ||
1927 | switch (model) { | 1945 | switch (model) { |
1928 | case 0x3A: /* IVB */ | 1946 | case 0x3A: /* IVB */ |
1929 | case 0x3E: /* IVB Xeon */ | ||
1930 | |||
1931 | case 0x3C: /* HSW */ | 1947 | case 0x3C: /* HSW */ |
1932 | case 0x3F: /* HSX */ | 1948 | case 0x3F: /* HSX */ |
1933 | case 0x45: /* HSW */ | 1949 | case 0x45: /* HSW */ |
@@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) | |||
2543 | return 0; | 2559 | return 0; |
2544 | } | 2560 | } |
2545 | 2561 | ||
2562 | unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) | ||
2563 | { | ||
2564 | if (is_knl(family, model)) | ||
2565 | return 1024; | ||
2566 | return 1; | ||
2567 | } | ||
2568 | |||
2546 | #define SLM_BCLK_FREQS 5 | 2569 | #define SLM_BCLK_FREQS 5 |
2547 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; | 2570 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; |
2548 | 2571 | ||
@@ -2744,6 +2767,9 @@ void process_cpuid() | |||
2744 | } | 2767 | } |
2745 | } | 2768 | } |
2746 | 2769 | ||
2770 | if (has_aperf) | ||
2771 | aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); | ||
2772 | |||
2747 | do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); | 2773 | do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); |
2748 | do_snb_cstates = has_snb_msrs(family, model); | 2774 | do_snb_cstates = has_snb_msrs(family, model); |
2749 | do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); | 2775 | do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); |
@@ -2762,6 +2788,9 @@ void process_cpuid() | |||
2762 | if (debug) | 2788 | if (debug) |
2763 | dump_cstate_pstate_config_info(); | 2789 | dump_cstate_pstate_config_info(); |
2764 | 2790 | ||
2791 | if (has_skl_msrs(family, model)) | ||
2792 | calculate_tsc_tweak(); | ||
2793 | |||
2765 | return; | 2794 | return; |
2766 | } | 2795 | } |
2767 | 2796 | ||
@@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) | |||
3090 | } | 3119 | } |
3091 | 3120 | ||
3092 | void print_version() { | 3121 | void print_version() { |
3093 | fprintf(stderr, "turbostat version 4.7 17-June, 2015" | 3122 | fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" |
3094 | " - Len Brown <lenb@kernel.org>\n"); | 3123 | " - Len Brown <lenb@kernel.org>\n"); |
3095 | } | 3124 | } |
3096 | 3125 | ||
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 89b05e2222c9..cfe121353eec 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -16,12 +16,12 @@ TARGETS += powerpc | |||
16 | TARGETS += ptrace | 16 | TARGETS += ptrace |
17 | TARGETS += seccomp | 17 | TARGETS += seccomp |
18 | TARGETS += size | 18 | TARGETS += size |
19 | TARGETS += static_keys | ||
19 | TARGETS += sysctl | 20 | TARGETS += sysctl |
20 | ifneq (1, $(quicktest)) | 21 | ifneq (1, $(quicktest)) |
21 | TARGETS += timers | 22 | TARGETS += timers |
22 | endif | 23 | endif |
23 | TARGETS += user | 24 | TARGETS += user |
24 | TARGETS += jumplabel | ||
25 | TARGETS += vm | 25 | TARGETS += vm |
26 | TARGETS += x86 | 26 | TARGETS += x86 |
27 | TARGETS += zram | 27 | TARGETS += zram |
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index 6b76bfdc847e..4e400eb83657 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | CFLAGS = -Wall | 1 | CFLAGS = -Wall |
2 | BINARIES = execveat | 2 | BINARIES = execveat |
3 | DEPS = execveat.symlink execveat.denatured script | 3 | DEPS = execveat.symlink execveat.denatured script subdir |
4 | all: $(BINARIES) $(DEPS) | 4 | all: $(BINARIES) $(DEPS) |
5 | 5 | ||
6 | subdir: | 6 | subdir: |
@@ -22,7 +22,5 @@ TEST_FILES := $(DEPS) | |||
22 | 22 | ||
23 | include ../lib.mk | 23 | include ../lib.mk |
24 | 24 | ||
25 | override EMIT_TESTS := echo "mkdir -p subdir; (./execveat && echo \"selftests: execveat [PASS]\") || echo \"selftests: execveat [FAIL]\"" | ||
26 | |||
27 | clean: | 25 | clean: |
28 | rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx* | 26 | rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx* |
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile index 0acbeca47225..4e6ed13e7f66 100644 --- a/tools/testing/selftests/ftrace/Makefile +++ b/tools/testing/selftests/ftrace/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | all: | 1 | all: |
2 | 2 | ||
3 | TEST_PROGS := ftracetest | 3 | TEST_PROGS := ftracetest |
4 | TEST_DIRS := test.d/ | 4 | TEST_DIRS := test.d |
5 | 5 | ||
6 | include ../lib.mk | 6 | include ../lib.mk |
7 | 7 | ||
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 97f1c6742066..50a93f5f13d6 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk | |||
@@ -12,13 +12,10 @@ run_tests: all | |||
12 | $(RUN_TESTS) | 12 | $(RUN_TESTS) |
13 | 13 | ||
14 | define INSTALL_RULE | 14 | define INSTALL_RULE |
15 | @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ | 15 | @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ |
16 | mkdir -p $(INSTALL_PATH); \ | 16 | mkdir -p ${INSTALL_PATH}; \ |
17 | for TEST_DIR in $(TEST_DIRS); do \ | 17 | echo "rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ |
18 | cp -r $$TEST_DIR $(INSTALL_PATH); \ | 18 | rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ |
19 | done; \ | ||
20 | echo "install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)"; \ | ||
21 | install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES); \ | ||
22 | fi | 19 | fi |
23 | endef | 20 | endef |
24 | 21 | ||
diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile index 877a50355d7f..a1a97085847d 100644 --- a/tools/testing/selftests/membarrier/Makefile +++ b/tools/testing/selftests/membarrier/Makefile | |||
@@ -1,11 +1,10 @@ | |||
1 | CFLAGS += -g -I../../../../usr/include/ | 1 | CFLAGS += -g -I../../../../usr/include/ |
2 | 2 | ||
3 | all: | ||
4 | $(CC) $(CFLAGS) membarrier_test.c -o membarrier_test | ||
5 | |||
6 | TEST_PROGS := membarrier_test | 3 | TEST_PROGS := membarrier_test |
7 | 4 | ||
5 | all: $(TEST_PROGS) | ||
6 | |||
8 | include ../lib.mk | 7 | include ../lib.mk |
9 | 8 | ||
10 | clean: | 9 | clean: |
11 | $(RM) membarrier_test | 10 | $(RM) $(TEST_PROGS) |
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c index dde312508007..535f0fef4d0b 100644 --- a/tools/testing/selftests/membarrier/membarrier_test.c +++ b/tools/testing/selftests/membarrier/membarrier_test.c | |||
@@ -1,9 +1,6 @@ | |||
1 | #define _GNU_SOURCE | 1 | #define _GNU_SOURCE |
2 | #define __EXPORTED_HEADERS__ | ||
3 | |||
4 | #include <linux/membarrier.h> | 2 | #include <linux/membarrier.h> |
5 | #include <asm-generic/unistd.h> | 3 | #include <syscall.h> |
6 | #include <sys/syscall.h> | ||
7 | #include <stdio.h> | 4 | #include <stdio.h> |
8 | #include <errno.h> | 5 | #include <errno.h> |
9 | #include <string.h> | 6 | #include <string.h> |
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 0e3b41eb85cd..eebac29acbd9 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | CFLAGS = -O2 | 1 | CFLAGS += -O2 |
2 | LDLIBS = -lrt -lpthread -lpopt | ||
3 | TEST_PROGS := mq_open_tests mq_perf_tests | ||
2 | 4 | ||
3 | all: | 5 | all: $(TEST_PROGS) |
4 | $(CC) $(CFLAGS) mq_open_tests.c -o mq_open_tests -lrt | ||
5 | $(CC) $(CFLAGS) -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt | ||
6 | 6 | ||
7 | include ../lib.mk | 7 | include ../lib.mk |
8 | 8 | ||
@@ -11,8 +11,6 @@ override define RUN_TESTS | |||
11 | @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" | 11 | @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" |
12 | endef | 12 | endef |
13 | 13 | ||
14 | TEST_PROGS := mq_open_tests mq_perf_tests | ||
15 | |||
16 | override define EMIT_TESTS | 14 | override define EMIT_TESTS |
17 | echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\"" | 15 | echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\"" |
18 | echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\"" | 16 | echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\"" |
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c index d1b647509596..6cae06117b55 100644 --- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c | |||
@@ -25,10 +25,19 @@ | |||
25 | 25 | ||
26 | #define FIXUP_SECTION ".ex_fixup" | 26 | #define FIXUP_SECTION ".ex_fixup" |
27 | 27 | ||
28 | static inline unsigned long __fls(unsigned long x); | ||
29 | |||
28 | #include "word-at-a-time.h" | 30 | #include "word-at-a-time.h" |
29 | 31 | ||
30 | #include "utils.h" | 32 | #include "utils.h" |
31 | 33 | ||
34 | static inline unsigned long __fls(unsigned long x) | ||
35 | { | ||
36 | int lz; | ||
37 | |||
38 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); | ||
39 | return sizeof(unsigned long) - 1 - lz; | ||
40 | } | ||
32 | 41 | ||
33 | static int page_size; | 42 | static int page_size; |
34 | static char *mem_region; | 43 | static char *mem_region; |
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index a004b4cce99e..770f47adf295 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -1210,6 +1210,10 @@ TEST_F(TRACE_poke, getpid_runs_normally) | |||
1210 | # define ARCH_REGS struct pt_regs | 1210 | # define ARCH_REGS struct pt_regs |
1211 | # define SYSCALL_NUM gpr[0] | 1211 | # define SYSCALL_NUM gpr[0] |
1212 | # define SYSCALL_RET gpr[3] | 1212 | # define SYSCALL_RET gpr[3] |
1213 | #elif defined(__s390__) | ||
1214 | # define ARCH_REGS s390_regs | ||
1215 | # define SYSCALL_NUM gprs[2] | ||
1216 | # define SYSCALL_RET gprs[2] | ||
1213 | #else | 1217 | #else |
1214 | # error "Do not know how to find your architecture's registers and syscalls" | 1218 | # error "Do not know how to find your architecture's registers and syscalls" |
1215 | #endif | 1219 | #endif |
@@ -1243,7 +1247,8 @@ void change_syscall(struct __test_metadata *_metadata, | |||
1243 | ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); | 1247 | ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); |
1244 | EXPECT_EQ(0, ret); | 1248 | EXPECT_EQ(0, ret); |
1245 | 1249 | ||
1246 | #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) | 1250 | #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ |
1251 | defined(__powerpc__) || defined(__s390__) | ||
1247 | { | 1252 | { |
1248 | regs.SYSCALL_NUM = syscall; | 1253 | regs.SYSCALL_NUM = syscall; |
1249 | } | 1254 | } |
@@ -1281,17 +1286,21 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, | |||
1281 | ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); | 1286 | ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); |
1282 | EXPECT_EQ(0, ret); | 1287 | EXPECT_EQ(0, ret); |
1283 | 1288 | ||
1289 | /* Validate and take action on expected syscalls. */ | ||
1284 | switch (msg) { | 1290 | switch (msg) { |
1285 | case 0x1002: | 1291 | case 0x1002: |
1286 | /* change getpid to getppid. */ | 1292 | /* change getpid to getppid. */ |
1293 | EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); | ||
1287 | change_syscall(_metadata, tracee, __NR_getppid); | 1294 | change_syscall(_metadata, tracee, __NR_getppid); |
1288 | break; | 1295 | break; |
1289 | case 0x1003: | 1296 | case 0x1003: |
1290 | /* skip gettid. */ | 1297 | /* skip gettid. */ |
1298 | EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); | ||
1291 | change_syscall(_metadata, tracee, -1); | 1299 | change_syscall(_metadata, tracee, -1); |
1292 | break; | 1300 | break; |
1293 | case 0x1004: | 1301 | case 0x1004: |
1294 | /* do nothing (allow getppid) */ | 1302 | /* do nothing (allow getppid) */ |
1303 | EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); | ||
1295 | break; | 1304 | break; |
1296 | default: | 1305 | default: |
1297 | EXPECT_EQ(0, msg) { | 1306 | EXPECT_EQ(0, msg) { |
@@ -1409,6 +1418,8 @@ TEST_F(TRACE_syscall, syscall_dropped) | |||
1409 | # define __NR_seccomp 277 | 1418 | # define __NR_seccomp 277 |
1410 | # elif defined(__powerpc__) | 1419 | # elif defined(__powerpc__) |
1411 | # define __NR_seccomp 358 | 1420 | # define __NR_seccomp 358 |
1421 | # elif defined(__s390__) | ||
1422 | # define __NR_seccomp 348 | ||
1412 | # else | 1423 | # else |
1413 | # warning "seccomp syscall number unknown for this architecture" | 1424 | # warning "seccomp syscall number unknown for this architecture" |
1414 | # define __NR_seccomp 0xffff | 1425 | # define __NR_seccomp 0xffff |
@@ -1453,6 +1464,9 @@ TEST(seccomp_syscall) | |||
1453 | 1464 | ||
1454 | /* Reject insane operation. */ | 1465 | /* Reject insane operation. */ |
1455 | ret = seccomp(-1, 0, &prog); | 1466 | ret = seccomp(-1, 0, &prog); |
1467 | ASSERT_NE(ENOSYS, errno) { | ||
1468 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1469 | } | ||
1456 | EXPECT_EQ(EINVAL, errno) { | 1470 | EXPECT_EQ(EINVAL, errno) { |
1457 | TH_LOG("Did not reject crazy op value!"); | 1471 | TH_LOG("Did not reject crazy op value!"); |
1458 | } | 1472 | } |
@@ -1501,6 +1515,9 @@ TEST(seccomp_syscall_mode_lock) | |||
1501 | } | 1515 | } |
1502 | 1516 | ||
1503 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); | 1517 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); |
1518 | ASSERT_NE(ENOSYS, errno) { | ||
1519 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1520 | } | ||
1504 | EXPECT_EQ(0, ret) { | 1521 | EXPECT_EQ(0, ret) { |
1505 | TH_LOG("Could not install filter!"); | 1522 | TH_LOG("Could not install filter!"); |
1506 | } | 1523 | } |
@@ -1535,6 +1552,9 @@ TEST(TSYNC_first) | |||
1535 | 1552 | ||
1536 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, | 1553 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, |
1537 | &prog); | 1554 | &prog); |
1555 | ASSERT_NE(ENOSYS, errno) { | ||
1556 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1557 | } | ||
1538 | EXPECT_EQ(0, ret) { | 1558 | EXPECT_EQ(0, ret) { |
1539 | TH_LOG("Could not install initial filter with TSYNC!"); | 1559 | TH_LOG("Could not install initial filter with TSYNC!"); |
1540 | } | 1560 | } |
@@ -1694,6 +1714,9 @@ TEST_F(TSYNC, siblings_fail_prctl) | |||
1694 | 1714 | ||
1695 | /* Check prctl failure detection by requesting sib 0 diverge. */ | 1715 | /* Check prctl failure detection by requesting sib 0 diverge. */ |
1696 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); | 1716 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); |
1717 | ASSERT_NE(ENOSYS, errno) { | ||
1718 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1719 | } | ||
1697 | ASSERT_EQ(0, ret) { | 1720 | ASSERT_EQ(0, ret) { |
1698 | TH_LOG("setting filter failed"); | 1721 | TH_LOG("setting filter failed"); |
1699 | } | 1722 | } |
@@ -1731,6 +1754,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor) | |||
1731 | } | 1754 | } |
1732 | 1755 | ||
1733 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); | 1756 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); |
1757 | ASSERT_NE(ENOSYS, errno) { | ||
1758 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1759 | } | ||
1734 | ASSERT_EQ(0, ret) { | 1760 | ASSERT_EQ(0, ret) { |
1735 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); | 1761 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); |
1736 | } | 1762 | } |
@@ -1805,6 +1831,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter) | |||
1805 | 1831 | ||
1806 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, | 1832 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, |
1807 | &self->apply_prog); | 1833 | &self->apply_prog); |
1834 | ASSERT_NE(ENOSYS, errno) { | ||
1835 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1836 | } | ||
1808 | ASSERT_EQ(0, ret) { | 1837 | ASSERT_EQ(0, ret) { |
1809 | TH_LOG("Could install filter on all threads!"); | 1838 | TH_LOG("Could install filter on all threads!"); |
1810 | } | 1839 | } |
@@ -1833,6 +1862,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence) | |||
1833 | } | 1862 | } |
1834 | 1863 | ||
1835 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); | 1864 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); |
1865 | ASSERT_NE(ENOSYS, errno) { | ||
1866 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1867 | } | ||
1836 | ASSERT_EQ(0, ret) { | 1868 | ASSERT_EQ(0, ret) { |
1837 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); | 1869 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); |
1838 | } | 1870 | } |
@@ -1890,6 +1922,9 @@ TEST_F(TSYNC, two_siblings_not_under_filter) | |||
1890 | } | 1922 | } |
1891 | 1923 | ||
1892 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); | 1924 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); |
1925 | ASSERT_NE(ENOSYS, errno) { | ||
1926 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
1927 | } | ||
1893 | ASSERT_EQ(0, ret) { | 1928 | ASSERT_EQ(0, ret) { |
1894 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); | 1929 | TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); |
1895 | } | 1930 | } |
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h index 977a6afc4489..fb2841601f2f 100644 --- a/tools/testing/selftests/seccomp/test_harness.h +++ b/tools/testing/selftests/seccomp/test_harness.h | |||
@@ -370,11 +370,8 @@ | |||
370 | __typeof__(_expected) __exp = (_expected); \ | 370 | __typeof__(_expected) __exp = (_expected); \ |
371 | __typeof__(_seen) __seen = (_seen); \ | 371 | __typeof__(_seen) __seen = (_seen); \ |
372 | if (!(__exp _t __seen)) { \ | 372 | if (!(__exp _t __seen)) { \ |
373 | unsigned long long __exp_print = 0; \ | 373 | unsigned long long __exp_print = (unsigned long long)__exp; \ |
374 | unsigned long long __seen_print = 0; \ | 374 | unsigned long long __seen_print = (unsigned long long)__seen; \ |
375 | /* Avoid casting complaints the scariest way we can. */ \ | ||
376 | memcpy(&__exp_print, &__exp, sizeof(__exp)); \ | ||
377 | memcpy(&__seen_print, &__seen, sizeof(__seen)); \ | ||
378 | __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ | 375 | __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ |
379 | #_expected, __exp_print, #_t, \ | 376 | #_expected, __exp_print, #_t, \ |
380 | #_seen, __seen_print); \ | 377 | #_seen, __seen_print); \ |
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index d36fab7d8ebd..3c53cac15de1 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # Makefile for vm selftests | 1 | # Makefile for vm selftests |
2 | 2 | ||
3 | CFLAGS = -Wall | 3 | CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) |
4 | BINARIES = compaction_test | 4 | BINARIES = compaction_test |
5 | BINARIES += hugepage-mmap | 5 | BINARIES += hugepage-mmap |
6 | BINARIES += hugepage-shm | 6 | BINARIES += hugepage-shm |
@@ -12,8 +12,11 @@ BINARIES += userfaultfd | |||
12 | all: $(BINARIES) | 12 | all: $(BINARIES) |
13 | %: %.c | 13 | %: %.c |
14 | $(CC) $(CFLAGS) -o $@ $^ -lrt | 14 | $(CC) $(CFLAGS) -o $@ $^ -lrt |
15 | userfaultfd: userfaultfd.c | 15 | userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h |
16 | $(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread | 16 | $(CC) $(CFLAGS) -O2 -o $@ $< -lpthread |
17 | |||
18 | ../../../../usr/include/linux/kernel.h: | ||
19 | make -C ../../../.. headers_install | ||
17 | 20 | ||
18 | TEST_PROGS := run_vmtests | 21 | TEST_PROGS := run_vmtests |
19 | TEST_FILES := $(BINARIES) | 22 | TEST_FILES := $(BINARIES) |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 2c7cca6f26a4..d77ed41b2094 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
@@ -64,17 +64,9 @@ | |||
64 | #include <sys/syscall.h> | 64 | #include <sys/syscall.h> |
65 | #include <sys/ioctl.h> | 65 | #include <sys/ioctl.h> |
66 | #include <pthread.h> | 66 | #include <pthread.h> |
67 | #include "../../../../include/uapi/linux/userfaultfd.h" | 67 | #include <linux/userfaultfd.h> |
68 | 68 | ||
69 | #ifdef __x86_64__ | 69 | #ifdef __NR_userfaultfd |
70 | #define __NR_userfaultfd 323 | ||
71 | #elif defined(__i386__) | ||
72 | #define __NR_userfaultfd 374 | ||
73 | #elif defined(__powewrpc__) | ||
74 | #define __NR_userfaultfd 364 | ||
75 | #else | ||
76 | #error "missing __NR_userfaultfd definition" | ||
77 | #endif | ||
78 | 70 | ||
79 | static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; | 71 | static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; |
80 | 72 | ||
@@ -430,7 +422,7 @@ static int userfaultfd_stress(void) | |||
430 | struct uffdio_register uffdio_register; | 422 | struct uffdio_register uffdio_register; |
431 | struct uffdio_api uffdio_api; | 423 | struct uffdio_api uffdio_api; |
432 | unsigned long cpu; | 424 | unsigned long cpu; |
433 | int uffd_flags; | 425 | int uffd_flags, err; |
434 | unsigned long userfaults[nr_cpus]; | 426 | unsigned long userfaults[nr_cpus]; |
435 | 427 | ||
436 | if (posix_memalign(&area, page_size, nr_pages * page_size)) { | 428 | if (posix_memalign(&area, page_size, nr_pages * page_size)) { |
@@ -473,6 +465,14 @@ static int userfaultfd_stress(void) | |||
473 | *area_mutex(area_src, nr) = (pthread_mutex_t) | 465 | *area_mutex(area_src, nr) = (pthread_mutex_t) |
474 | PTHREAD_MUTEX_INITIALIZER; | 466 | PTHREAD_MUTEX_INITIALIZER; |
475 | count_verify[nr] = *area_count(area_src, nr) = 1; | 467 | count_verify[nr] = *area_count(area_src, nr) = 1; |
468 | /* | ||
469 | * In the transition between 255 to 256, powerpc will | ||
470 | * read out of order in my_bcmp and see both bytes as | ||
471 | * zero, so leave a placeholder below always non-zero | ||
472 | * after the count, to avoid my_bcmp to trigger false | ||
473 | * positives. | ||
474 | */ | ||
475 | *(area_count(area_src, nr) + 1) = 1; | ||
476 | } | 476 | } |
477 | 477 | ||
478 | pipefd = malloc(sizeof(int) * nr_cpus * 2); | 478 | pipefd = malloc(sizeof(int) * nr_cpus * 2); |
@@ -499,6 +499,7 @@ static int userfaultfd_stress(void) | |||
499 | pthread_attr_init(&attr); | 499 | pthread_attr_init(&attr); |
500 | pthread_attr_setstacksize(&attr, 16*1024*1024); | 500 | pthread_attr_setstacksize(&attr, 16*1024*1024); |
501 | 501 | ||
502 | err = 0; | ||
502 | while (bounces--) { | 503 | while (bounces--) { |
503 | unsigned long expected_ioctls; | 504 | unsigned long expected_ioctls; |
504 | 505 | ||
@@ -579,20 +580,13 @@ static int userfaultfd_stress(void) | |||
579 | /* verification */ | 580 | /* verification */ |
580 | if (bounces & BOUNCE_VERIFY) { | 581 | if (bounces & BOUNCE_VERIFY) { |
581 | for (nr = 0; nr < nr_pages; nr++) { | 582 | for (nr = 0; nr < nr_pages; nr++) { |
582 | if (my_bcmp(area_dst, | ||
583 | area_dst + nr * page_size, | ||
584 | sizeof(pthread_mutex_t))) { | ||
585 | fprintf(stderr, | ||
586 | "error mutex 2 %lu\n", | ||
587 | nr); | ||
588 | bounces = 0; | ||
589 | } | ||
590 | if (*area_count(area_dst, nr) != count_verify[nr]) { | 583 | if (*area_count(area_dst, nr) != count_verify[nr]) { |
591 | fprintf(stderr, | 584 | fprintf(stderr, |
592 | "error area_count %Lu %Lu %lu\n", | 585 | "error area_count %Lu %Lu %lu\n", |
593 | *area_count(area_src, nr), | 586 | *area_count(area_src, nr), |
594 | count_verify[nr], | 587 | count_verify[nr], |
595 | nr); | 588 | nr); |
589 | err = 1; | ||
596 | bounces = 0; | 590 | bounces = 0; |
597 | } | 591 | } |
598 | } | 592 | } |
@@ -609,7 +603,7 @@ static int userfaultfd_stress(void) | |||
609 | printf("\n"); | 603 | printf("\n"); |
610 | } | 604 | } |
611 | 605 | ||
612 | return 0; | 606 | return err; |
613 | } | 607 | } |
614 | 608 | ||
615 | int main(int argc, char **argv) | 609 | int main(int argc, char **argv) |
@@ -618,8 +612,8 @@ int main(int argc, char **argv) | |||
618 | fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); | 612 | fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); |
619 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 613 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
620 | page_size = sysconf(_SC_PAGE_SIZE); | 614 | page_size = sysconf(_SC_PAGE_SIZE); |
621 | if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) > | 615 | if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2 |
622 | page_size) | 616 | > page_size) |
623 | fprintf(stderr, "Impossible to run this test\n"), exit(2); | 617 | fprintf(stderr, "Impossible to run this test\n"), exit(2); |
624 | nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / | 618 | nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / |
625 | nr_cpus; | 619 | nr_cpus; |
@@ -637,3 +631,15 @@ int main(int argc, char **argv) | |||
637 | nr_pages, nr_pages_per_cpu); | 631 | nr_pages, nr_pages_per_cpu); |
638 | return userfaultfd_stress(); | 632 | return userfaultfd_stress(); |
639 | } | 633 | } |
634 | |||
635 | #else /* __NR_userfaultfd */ | ||
636 | |||
637 | #warning "missing __NR_userfaultfd definition" | ||
638 | |||
639 | int main(void) | ||
640 | { | ||
641 | printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | #endif /* __NR_userfaultfd */ | ||
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c index 9a43a59a9bb4..421c607a8856 100644 --- a/tools/testing/selftests/x86/entry_from_vm86.c +++ b/tools/testing/selftests/x86/entry_from_vm86.c | |||
@@ -116,8 +116,9 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, | |||
116 | v86->regs.eip = eip; | 116 | v86->regs.eip = eip; |
117 | ret = vm86(VM86_ENTER, v86); | 117 | ret = vm86(VM86_ENTER, v86); |
118 | 118 | ||
119 | if (ret == -1 && errno == ENOSYS) { | 119 | if (ret == -1 && (errno == ENOSYS || errno == EPERM)) { |
120 | printf("[SKIP]\tvm86 not supported\n"); | 120 | printf("[SKIP]\tvm86 %s\n", |
121 | errno == ENOSYS ? "not supported" : "not allowed"); | ||
121 | return false; | 122 | return false; |
122 | } | 123 | } |
123 | 124 | ||
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 20de9a761269..683a292e3290 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh | |||
@@ -1,15 +1,7 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | TCID="zram.sh" | 2 | TCID="zram.sh" |
3 | 3 | ||
4 | check_prereqs() | 4 | . ./zram_lib.sh |
5 | { | ||
6 | local msg="skip all tests:" | ||
7 | |||
8 | if [ $UID != 0 ]; then | ||
9 | echo $msg must be run as root >&2 | ||
10 | exit 0 | ||
11 | fi | ||
12 | } | ||
13 | 5 | ||
14 | run_zram () { | 6 | run_zram () { |
15 | echo "--------------------" | 7 | echo "--------------------" |
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 424e68ed1487..f6a9c73e7a44 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh | |||
@@ -23,8 +23,9 @@ trap INT | |||
23 | check_prereqs() | 23 | check_prereqs() |
24 | { | 24 | { |
25 | local msg="skip all tests:" | 25 | local msg="skip all tests:" |
26 | local uid=$(id -u) | ||
26 | 27 | ||
27 | if [ $UID != 0 ]; then | 28 | if [ $uid -ne 0 ]; then |
28 | echo $msg must be run as root >&2 | 29 | echo $msg must be run as root >&2 |
29 | exit 0 | 30 | exit 0 |
30 | fi | 31 | fi |
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index 505ad51b3b51..39c89a5ea990 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile | |||
@@ -6,7 +6,7 @@ vringh_test: vringh_test.o vringh.o virtio_ring.o | |||
6 | CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE | 6 | CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE |
7 | vpath %.c ../../drivers/virtio ../../drivers/vhost | 7 | vpath %.c ../../drivers/virtio ../../drivers/vhost |
8 | mod: | 8 | mod: |
9 | ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test | 9 | ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} |
10 | .PHONY: all test mod clean | 10 | .PHONY: all test mod clean |
11 | clean: | 11 | clean: |
12 | ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ | 12 | ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ |
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h index aff61e13306c..26b7926bda88 100644 --- a/tools/virtio/asm/barrier.h +++ b/tools/virtio/asm/barrier.h | |||
@@ -3,6 +3,8 @@ | |||
3 | #define mb() __sync_synchronize() | 3 | #define mb() __sync_synchronize() |
4 | 4 | ||
5 | #define smp_mb() mb() | 5 | #define smp_mb() mb() |
6 | # define dma_rmb() barrier() | ||
7 | # define dma_wmb() barrier() | ||
6 | # define smp_rmb() barrier() | 8 | # define smp_rmb() barrier() |
7 | # define smp_wmb() barrier() | 9 | # define smp_wmb() barrier() |
8 | /* Weak barriers should be used. If not - it's a bug */ | 10 | /* Weak barriers should be used. If not - it's a bug */ |
diff --git a/tools/virtio/linux/export.h b/tools/virtio/linux/export.h new file mode 100644 index 000000000000..416875e29254 --- /dev/null +++ b/tools/virtio/linux/export.h | |||
@@ -0,0 +1,3 @@ | |||
1 | #define EXPORT_SYMBOL_GPL(sym) extern typeof(sym) sym | ||
2 | #define EXPORT_SYMBOL(sym) extern typeof(sym) sym | ||
3 | |||
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 1e8ce6979c1e..0a3da64638ce 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | typedef unsigned long long dma_addr_t; | 23 | typedef unsigned long long dma_addr_t; |
24 | typedef size_t __kernel_size_t; | 24 | typedef size_t __kernel_size_t; |
25 | typedef unsigned int __wsum; | ||
25 | 26 | ||
26 | struct page { | 27 | struct page { |
27 | unsigned long long dummy; | 28 | unsigned long long dummy; |
@@ -47,6 +48,13 @@ static inline void *kmalloc(size_t s, gfp_t gfp) | |||
47 | return __kmalloc_fake; | 48 | return __kmalloc_fake; |
48 | return malloc(s); | 49 | return malloc(s); |
49 | } | 50 | } |
51 | static inline void *kzalloc(size_t s, gfp_t gfp) | ||
52 | { | ||
53 | void *p = kmalloc(s, gfp); | ||
54 | |||
55 | memset(p, 0, s); | ||
56 | return p; | ||
57 | } | ||
50 | 58 | ||
51 | static inline void kfree(void *p) | 59 | static inline void kfree(void *p) |
52 | { | 60 | { |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 76e38d231e99..48c6e1ac6827 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -200,6 +200,14 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, | |||
200 | timer->irq = irq; | 200 | timer->irq = irq; |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 | ||
204 | * and to 0 for ARMv7. We provide an implementation that always | ||
205 | * resets the timer to be disabled and unmasked and is compliant with | ||
206 | * the ARMv7 architecture. | ||
207 | */ | ||
208 | timer->cntv_ctl = 0; | ||
209 | |||
210 | /* | ||
203 | * Tell the VGIC that the virtual interrupt is tied to a | 211 | * Tell the VGIC that the virtual interrupt is tied to a |
204 | * physical interrupt. We do that once per VCPU. | 212 | * physical interrupt. We do that once per VCPU. |
205 | */ | 213 | */ |
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index afbf925b00f4..7dd5d62f10a1 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c | |||
@@ -288,7 +288,7 @@ int vgic_v3_probe(struct device_node *vgic_node, | |||
288 | 288 | ||
289 | vgic->vctrl_base = NULL; | 289 | vgic->vctrl_base = NULL; |
290 | vgic->type = VGIC_V3; | 290 | vgic->type = VGIC_V3; |
291 | vgic->max_gic_vcpus = KVM_MAX_VCPUS; | 291 | vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS; |
292 | 292 | ||
293 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, | 293 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, |
294 | vcpu_res.start, vgic->maint_irq); | 294 | vcpu_res.start, vgic->maint_irq); |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 9eb489a2c94c..6bd1c9bf7ae7 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -1144,26 +1144,11 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, | |||
1144 | struct irq_phys_map *map; | 1144 | struct irq_phys_map *map; |
1145 | map = vgic_irq_map_search(vcpu, irq); | 1145 | map = vgic_irq_map_search(vcpu, irq); |
1146 | 1146 | ||
1147 | /* | ||
1148 | * If we have a mapping, and the virtual interrupt is | ||
1149 | * being injected, then we must set the state to | ||
1150 | * active in the physical world. Otherwise the | ||
1151 | * physical interrupt will fire and the guest will | ||
1152 | * exit before processing the virtual interrupt. | ||
1153 | */ | ||
1154 | if (map) { | 1147 | if (map) { |
1155 | int ret; | ||
1156 | |||
1157 | BUG_ON(!map->active); | ||
1158 | vlr.hwirq = map->phys_irq; | 1148 | vlr.hwirq = map->phys_irq; |
1159 | vlr.state |= LR_HW; | 1149 | vlr.state |= LR_HW; |
1160 | vlr.state &= ~LR_EOI_INT; | 1150 | vlr.state &= ~LR_EOI_INT; |
1161 | 1151 | ||
1162 | ret = irq_set_irqchip_state(map->irq, | ||
1163 | IRQCHIP_STATE_ACTIVE, | ||
1164 | true); | ||
1165 | WARN_ON(ret); | ||
1166 | |||
1167 | /* | 1152 | /* |
1168 | * Make sure we're not going to sample this | 1153 | * Make sure we're not going to sample this |
1169 | * again, as a HW-backed interrupt cannot be | 1154 | * again, as a HW-backed interrupt cannot be |
@@ -1255,7 +1240,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1255 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1240 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1256 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1241 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1257 | unsigned long *pa_percpu, *pa_shared; | 1242 | unsigned long *pa_percpu, *pa_shared; |
1258 | int i, vcpu_id; | 1243 | int i, vcpu_id, lr, ret; |
1259 | int overflow = 0; | 1244 | int overflow = 0; |
1260 | int nr_shared = vgic_nr_shared_irqs(dist); | 1245 | int nr_shared = vgic_nr_shared_irqs(dist); |
1261 | 1246 | ||
@@ -1310,6 +1295,31 @@ epilog: | |||
1310 | */ | 1295 | */ |
1311 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); | 1296 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); |
1312 | } | 1297 | } |
1298 | |||
1299 | for (lr = 0; lr < vgic->nr_lr; lr++) { | ||
1300 | struct vgic_lr vlr; | ||
1301 | |||
1302 | if (!test_bit(lr, vgic_cpu->lr_used)) | ||
1303 | continue; | ||
1304 | |||
1305 | vlr = vgic_get_lr(vcpu, lr); | ||
1306 | |||
1307 | /* | ||
1308 | * If we have a mapping, and the virtual interrupt is | ||
1309 | * presented to the guest (as pending or active), then we must | ||
1310 | * set the state to active in the physical world. See | ||
1311 | * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt. | ||
1312 | */ | ||
1313 | if (vlr.state & LR_HW) { | ||
1314 | struct irq_phys_map *map; | ||
1315 | map = vgic_irq_map_search(vcpu, vlr.irq); | ||
1316 | |||
1317 | ret = irq_set_irqchip_state(map->irq, | ||
1318 | IRQCHIP_STATE_ACTIVE, | ||
1319 | true); | ||
1320 | WARN_ON(ret); | ||
1321 | } | ||
1322 | } | ||
1313 | } | 1323 | } |
1314 | 1324 | ||
1315 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | 1325 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) |
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h index 5cbf190d238c..6bca74ca5331 100644 --- a/virt/kvm/coalesced_mmio.h +++ b/virt/kvm/coalesced_mmio.h | |||
@@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev { | |||
24 | int kvm_coalesced_mmio_init(struct kvm *kvm); | 24 | int kvm_coalesced_mmio_init(struct kvm *kvm); |
25 | void kvm_coalesced_mmio_free(struct kvm *kvm); | 25 | void kvm_coalesced_mmio_free(struct kvm *kvm); |
26 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | 26 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
27 | struct kvm_coalesced_mmio_zone *zone); | 27 | struct kvm_coalesced_mmio_zone *zone); |
28 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | 28 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |
29 | struct kvm_coalesced_mmio_zone *zone); | 29 | struct kvm_coalesced_mmio_zone *zone); |
30 | 30 | ||
31 | #else | 31 | #else |
32 | 32 | ||
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 9ff4193dfa49..79db45336e3a 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) | |||
771 | return KVM_MMIO_BUS; | 771 | return KVM_MMIO_BUS; |
772 | } | 772 | } |
773 | 773 | ||
774 | static int | 774 | static int kvm_assign_ioeventfd_idx(struct kvm *kvm, |
775 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 775 | enum kvm_bus bus_idx, |
776 | struct kvm_ioeventfd *args) | ||
776 | { | 777 | { |
777 | enum kvm_bus bus_idx; | ||
778 | struct _ioeventfd *p; | ||
779 | struct eventfd_ctx *eventfd; | ||
780 | int ret; | ||
781 | |||
782 | bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
783 | /* must be natural-word sized, or 0 to ignore length */ | ||
784 | switch (args->len) { | ||
785 | case 0: | ||
786 | case 1: | ||
787 | case 2: | ||
788 | case 4: | ||
789 | case 8: | ||
790 | break; | ||
791 | default: | ||
792 | return -EINVAL; | ||
793 | } | ||
794 | |||
795 | /* check for range overflow */ | ||
796 | if (args->addr + args->len < args->addr) | ||
797 | return -EINVAL; | ||
798 | 778 | ||
799 | /* check for extra flags that we don't understand */ | 779 | struct eventfd_ctx *eventfd; |
800 | if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) | 780 | struct _ioeventfd *p; |
801 | return -EINVAL; | 781 | int ret; |
802 | |||
803 | /* ioeventfd with no length can't be combined with DATAMATCH */ | ||
804 | if (!args->len && | ||
805 | args->flags & (KVM_IOEVENTFD_FLAG_PIO | | ||
806 | KVM_IOEVENTFD_FLAG_DATAMATCH)) | ||
807 | return -EINVAL; | ||
808 | 782 | ||
809 | eventfd = eventfd_ctx_fdget(args->fd); | 783 | eventfd = eventfd_ctx_fdget(args->fd); |
810 | if (IS_ERR(eventfd)) | 784 | if (IS_ERR(eventfd)) |
@@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
843 | if (ret < 0) | 817 | if (ret < 0) |
844 | goto unlock_fail; | 818 | goto unlock_fail; |
845 | 819 | ||
846 | /* When length is ignored, MMIO is also put on a separate bus, for | ||
847 | * faster lookups. | ||
848 | */ | ||
849 | if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) { | ||
850 | ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS, | ||
851 | p->addr, 0, &p->dev); | ||
852 | if (ret < 0) | ||
853 | goto register_fail; | ||
854 | } | ||
855 | |||
856 | kvm->buses[bus_idx]->ioeventfd_count++; | 820 | kvm->buses[bus_idx]->ioeventfd_count++; |
857 | list_add_tail(&p->list, &kvm->ioeventfds); | 821 | list_add_tail(&p->list, &kvm->ioeventfds); |
858 | 822 | ||
@@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
860 | 824 | ||
861 | return 0; | 825 | return 0; |
862 | 826 | ||
863 | register_fail: | ||
864 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); | ||
865 | unlock_fail: | 827 | unlock_fail: |
866 | mutex_unlock(&kvm->slots_lock); | 828 | mutex_unlock(&kvm->slots_lock); |
867 | 829 | ||
@@ -873,14 +835,13 @@ fail: | |||
873 | } | 835 | } |
874 | 836 | ||
875 | static int | 837 | static int |
876 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 838 | kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, |
839 | struct kvm_ioeventfd *args) | ||
877 | { | 840 | { |
878 | enum kvm_bus bus_idx; | ||
879 | struct _ioeventfd *p, *tmp; | 841 | struct _ioeventfd *p, *tmp; |
880 | struct eventfd_ctx *eventfd; | 842 | struct eventfd_ctx *eventfd; |
881 | int ret = -ENOENT; | 843 | int ret = -ENOENT; |
882 | 844 | ||
883 | bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
884 | eventfd = eventfd_ctx_fdget(args->fd); | 845 | eventfd = eventfd_ctx_fdget(args->fd); |
885 | if (IS_ERR(eventfd)) | 846 | if (IS_ERR(eventfd)) |
886 | return PTR_ERR(eventfd); | 847 | return PTR_ERR(eventfd); |
@@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
901 | continue; | 862 | continue; |
902 | 863 | ||
903 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); | 864 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); |
904 | if (!p->length) { | ||
905 | kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS, | ||
906 | &p->dev); | ||
907 | } | ||
908 | kvm->buses[bus_idx]->ioeventfd_count--; | 865 | kvm->buses[bus_idx]->ioeventfd_count--; |
909 | ioeventfd_release(p); | 866 | ioeventfd_release(p); |
910 | ret = 0; | 867 | ret = 0; |
@@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
918 | return ret; | 875 | return ret; |
919 | } | 876 | } |
920 | 877 | ||
878 | static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | ||
879 | { | ||
880 | enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
881 | int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); | ||
882 | |||
883 | if (!args->len && bus_idx == KVM_MMIO_BUS) | ||
884 | kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); | ||
885 | |||
886 | return ret; | ||
887 | } | ||
888 | |||
889 | static int | ||
890 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | ||
891 | { | ||
892 | enum kvm_bus bus_idx; | ||
893 | int ret; | ||
894 | |||
895 | bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
896 | /* must be natural-word sized, or 0 to ignore length */ | ||
897 | switch (args->len) { | ||
898 | case 0: | ||
899 | case 1: | ||
900 | case 2: | ||
901 | case 4: | ||
902 | case 8: | ||
903 | break; | ||
904 | default: | ||
905 | return -EINVAL; | ||
906 | } | ||
907 | |||
908 | /* check for range overflow */ | ||
909 | if (args->addr + args->len < args->addr) | ||
910 | return -EINVAL; | ||
911 | |||
912 | /* check for extra flags that we don't understand */ | ||
913 | if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) | ||
914 | return -EINVAL; | ||
915 | |||
916 | /* ioeventfd with no length can't be combined with DATAMATCH */ | ||
917 | if (!args->len && | ||
918 | args->flags & (KVM_IOEVENTFD_FLAG_PIO | | ||
919 | KVM_IOEVENTFD_FLAG_DATAMATCH)) | ||
920 | return -EINVAL; | ||
921 | |||
922 | ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); | ||
923 | if (ret) | ||
924 | goto fail; | ||
925 | |||
926 | /* When length is ignored, MMIO is also put on a separate bus, for | ||
927 | * faster lookups. | ||
928 | */ | ||
929 | if (!args->len && bus_idx == KVM_MMIO_BUS) { | ||
930 | ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); | ||
931 | if (ret < 0) | ||
932 | goto fast_fail; | ||
933 | } | ||
934 | |||
935 | return 0; | ||
936 | |||
937 | fast_fail: | ||
938 | kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); | ||
939 | fail: | ||
940 | return ret; | ||
941 | } | ||
942 | |||
921 | int | 943 | int |
922 | kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 944 | kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
923 | { | 945 | { |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a25a73147f71..8db1d9361993 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -66,8 +66,8 @@ | |||
66 | MODULE_AUTHOR("Qumranet"); | 66 | MODULE_AUTHOR("Qumranet"); |
67 | MODULE_LICENSE("GPL"); | 67 | MODULE_LICENSE("GPL"); |
68 | 68 | ||
69 | /* halt polling only reduces halt latency by 5-7 us, 500us is enough */ | 69 | /* Architectures should define their poll value according to the halt latency */ |
70 | static unsigned int halt_poll_ns = 500000; | 70 | static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; |
71 | module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); | 71 | module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); |
72 | 72 | ||
73 | /* Default doubles per-vcpu halt_poll_ns. */ | 73 | /* Default doubles per-vcpu halt_poll_ns. */ |
@@ -2004,6 +2004,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) | |||
2004 | if (vcpu->halt_poll_ns) { | 2004 | if (vcpu->halt_poll_ns) { |
2005 | ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); | 2005 | ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); |
2006 | 2006 | ||
2007 | ++vcpu->stat.halt_attempted_poll; | ||
2007 | do { | 2008 | do { |
2008 | /* | 2009 | /* |
2009 | * This sets KVM_REQ_UNHALT if an interrupt | 2010 | * This sets KVM_REQ_UNHALT if an interrupt |
@@ -2043,7 +2044,8 @@ out: | |||
2043 | else if (vcpu->halt_poll_ns < halt_poll_ns && | 2044 | else if (vcpu->halt_poll_ns < halt_poll_ns && |
2044 | block_ns < halt_poll_ns) | 2045 | block_ns < halt_poll_ns) |
2045 | grow_halt_poll_ns(vcpu); | 2046 | grow_halt_poll_ns(vcpu); |
2046 | } | 2047 | } else |
2048 | vcpu->halt_poll_ns = 0; | ||
2047 | 2049 | ||
2048 | trace_kvm_vcpu_wakeup(block_ns, waited); | 2050 | trace_kvm_vcpu_wakeup(block_ns, waited); |
2049 | } | 2051 | } |
@@ -3156,10 +3158,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
3156 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, | 3158 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, |
3157 | const struct kvm_io_range *r2) | 3159 | const struct kvm_io_range *r2) |
3158 | { | 3160 | { |
3159 | if (r1->addr < r2->addr) | 3161 | gpa_t addr1 = r1->addr; |
3162 | gpa_t addr2 = r2->addr; | ||
3163 | |||
3164 | if (addr1 < addr2) | ||
3160 | return -1; | 3165 | return -1; |
3161 | if (r1->addr + r1->len > r2->addr + r2->len) | 3166 | |
3167 | /* If r2->len == 0, match the exact address. If r2->len != 0, | ||
3168 | * accept any overlapping write. Any order is acceptable for | ||
3169 | * overlapping ranges, because kvm_io_bus_get_first_dev ensures | ||
3170 | * we process all of them. | ||
3171 | */ | ||
3172 | if (r2->len) { | ||
3173 | addr1 += r1->len; | ||
3174 | addr2 += r2->len; | ||
3175 | } | ||
3176 | |||
3177 | if (addr1 > addr2) | ||
3162 | return 1; | 3178 | return 1; |
3179 | |||
3163 | return 0; | 3180 | return 0; |
3164 | } | 3181 | } |
3165 | 3182 | ||